diff --git a/python/paddle/distribution/normal.py b/python/paddle/distribution/normal.py index f28b92ec86baea10bf4f68db38c84ebfc21d34ff..b3877af277fbb96b5e949d120bc902dfdd1441a4 100644 --- a/python/paddle/distribution/normal.py +++ b/python/paddle/distribution/normal.py @@ -242,7 +242,7 @@ class Normal(distribution.Distribution): ) return paddle.add( 0.5 + zero_tmp, - 0.5 * math.log(2 * math.pi) + nn.log((self.scale + zero_tmp)), + 0.5 * math.log(2 * math.pi) + paddle.log((self.scale + zero_tmp)), name=name, ) @@ -260,7 +260,7 @@ class Normal(distribution.Distribution): value = self._check_values_dtype_in_probs(self.loc, value) var = self.scale * self.scale - log_scale = nn.log(self.scale) + log_scale = paddle.log(self.scale) return paddle.subtract( -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var), log_scale + math.log(math.sqrt(2.0 * math.pi)), @@ -331,5 +331,5 @@ class Normal(distribution.Distribution): t1 = (self.loc - other.loc) / other.scale t1 = t1 * t1 return paddle.add( - 0.5 * var_ratio, 0.5 * (t1 - 1.0 - nn.log(var_ratio)), name=name + 0.5 * var_ratio, 0.5 * (t1 - 1.0 - paddle.log(var_ratio)), name=name ) diff --git a/python/paddle/distribution/uniform.py b/python/paddle/distribution/uniform.py index f242dc3db0da93a6fb6540c2eb9804a7ce665d80..b9566d3c8dbc274c182570bdba07199eeacd9c6e 100644 --- a/python/paddle/distribution/uniform.py +++ b/python/paddle/distribution/uniform.py @@ -27,6 +27,8 @@ from paddle.fluid.layers import ( nn, tensor, ) + +import paddle from paddle.tensor import random @@ -216,7 +218,7 @@ class Uniform(distribution.Distribution): if in_dygraph_mode(): lb = _C_ops.cast(lb_bool, value.dtype) ub = _C_ops.cast(ub_bool, value.dtype) - return nn.log(lb * ub) - nn.log(self.high - self.low) + return paddle.log(lb * ub) - paddle.log(self.high - self.low) if _in_legacy_dygraph(): lb = _legacy_C_ops.cast( @@ -225,7 +227,7 @@ class Uniform(distribution.Distribution): ub = _legacy_C_ops.cast( ub_bool, 'in_dtype', ub_bool.dtype, 'out_dtype', value.dtype ) - return nn.log(lb * ub) - nn.log(self.high - self.low) + return paddle.log(lb * ub) - paddle.log(self.high - self.low) name = self.name + '_log_prob' lb_bool = self.low < value @@ -233,7 +235,7 @@ class Uniform(distribution.Distribution): lb = tensor.cast(lb_bool, dtype=value.dtype) ub = tensor.cast(ub_bool, dtype=value.dtype) return paddle.subtract( - nn.log(lb * ub), nn.log(self.high - self.low), name=name + paddle.log(lb * ub), paddle.log(self.high - self.low), name=name ) def probs(self, value): @@ -286,4 +288,4 @@ class Uniform(distribution.Distribution): """ name = self.name + '_entropy' - return nn.log(self.high - self.low, name=name) + return paddle.log(self.high - self.low, name=name) diff --git a/python/paddle/fluid/layers/distributions.py b/python/paddle/fluid/layers/distributions.py index d3ca0de64b5c3e528909d24f0a218449fc407267..a54403013c69cddb23d269bbe7a94ecf49e45294 100644 --- a/python/paddle/fluid/layers/distributions.py +++ b/python/paddle/fluid/layers/distributions.py @@ -264,7 +264,7 @@ class Uniform(Distribution): ub_bool = control_flow.less_than(value, self.high) lb = tensor.cast(lb_bool, dtype=value.dtype) ub = tensor.cast(ub_bool, dtype=value.dtype) - return nn.log(lb * ub) - nn.log(self.high - self.low) + return paddle.log(lb * ub) - paddle.log(self.high - self.low) def entropy(self): """Shannon entropy in nats. @@ -273,7 +273,7 @@ class Uniform(Distribution): Variable: Shannon entropy of uniform distribution.The data type is float32. """ - return nn.log(self.high - self.low) + return paddle.log(self.high - self.low) class Normal(Distribution): @@ -412,7 +412,9 @@ class Normal(Distribution): self.loc + self.scale, batch_shape, self.loc.dtype, 0.0 ) return ( - 0.5 + 0.5 * math.log(2 * math.pi) + nn.log((self.scale + zero_tmp)) + 0.5 + + 0.5 * math.log(2 * math.pi) + + paddle.log((self.scale + zero_tmp)) ) def log_prob(self, value): @@ -430,7 +432,7 @@ class Normal(Distribution): ) var = self.scale * self.scale - log_scale = nn.log(self.scale) + log_scale = paddle.log(self.scale) return ( -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var) - log_scale @@ -454,7 +456,7 @@ class Normal(Distribution): var_ratio = var_ratio * var_ratio t1 = (self.loc - other.loc) / other.scale t1 = t1 * t1 - return 0.5 * (var_ratio + t1 - 1.0 - nn.log(var_ratio)) + return 0.5 * (var_ratio + t1 - 1.0 - paddle.log(var_ratio)) class Categorical(Distribution): @@ -542,7 +544,8 @@ class Categorical(Distribution): other_z = paddle.sum(other_e_logits, axis=-1, keepdim=True) prob = e_logits / z kl = paddle.sum( - prob * (logits - nn.log(z) - other_logits + nn.log(other_z)), + prob + * (logits - paddle.log(z) - other_logits + paddle.log(other_z)), axis=-1, keepdim=True, ) @@ -562,7 +565,7 @@ class Categorical(Distribution): prob = e_logits / z entropy = -1.0 * paddle.sum( - prob * (logits - nn.log(z)), axis=-1, keepdim=True + prob * (logits - paddle.log(z)), axis=-1, keepdim=True ) return entropy @@ -687,7 +690,7 @@ class MultivariateNormalDiag(Distribution): """ entropy = 0.5 * ( self.scale.shape[0] * (1.0 + math.log(2 * math.pi)) - + nn.log(self._det(self.scale)) + + paddle.log(self._det(self.scale)) ) return entropy @@ -710,7 +713,9 @@ class MultivariateNormalDiag(Distribution): ) tri_matmul = nn.matmul(loc_matmul_cov, (other.loc - self.loc)) k = list(self.scale.shape)[0] - ln_cov = nn.log(self._det(other.scale)) - nn.log(self._det(self.scale)) + ln_cov = paddle.log(self._det(other.scale)) - paddle.log( + self._det(self.scale) + ) kl = 0.5 * (tr_cov_matmul + tri_matmul - k + ln_cov) return kl diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b59e0c4c800e8d64b55413ef7ac7fcd077067a86..911ac5d74a339e5ba7aa63a5b37936ec0a56b613 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -97,7 +97,6 @@ __all__ = [ 'resize_trilinear', 'resize_nearest', 'relu', - 'log', 'unique', 'unique_with_counts', 'elementwise_add', @@ -5246,47 +5245,6 @@ def resize_nearest( ) -def log(x, name=None): - r""" - Calculates the natural log of the given input tensor, element-wise. - - .. math:: - - Out = \\ln(x) - - Args: - x (Tensor): Input Tensor. Must be one of the following types: float32, float64. - name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - - - Returns: - Tensor: The natural log of the input Tensor computed element-wise. - - Examples: - - .. code-block:: python - - import paddle - - x = [[2,3,4], [7,8,9]] - x = paddle.to_tensor(x, dtype='float32') - res = paddle.log(x) - # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]] - """ - if in_dygraph_mode(): - return _C_ops.log(x) - if _in_legacy_dygraph(): - return _legacy_C_ops.log(x) - - check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log") - inputs = {'X': [x]} - helper = LayerHelper('log', **locals()) - dtype = helper.input_dtype(input_param_name='x') - out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) - return out - - @deprecated(since="2.0.0", update_to="paddle.nn.functional.relu") def relu(x, name=None): """ diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index e65e4b63300ea79433f5603289845ec1df0e27b5..23e61db9f47a6f520fbe2cd09e535430e9b362bd 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -1304,7 +1304,7 @@ class BeamSearchDecoder(Decoder): self.noend_mask_tensor, "float64" ) - step_log_probs = nn.log(nn.softmax(logits)) + step_log_probs = paddle.log(nn.softmax(logits)) step_log_probs = self._mask_probs(step_log_probs, beam_state.finished) log_probs = nn.elementwise_add( x=step_log_probs, y=beam_state.log_probs, axis=0 @@ -3529,8 +3529,8 @@ def beam_search( name='probs', shape=[None, 10000], dtype='float32') topk_scores, topk_indices = fluid.layers.topk(probs, k=beam_size) accu_scores = fluid.layers.elementwise_add( - x=fluid.layers.log(x=topk_scores), - y=fluid.layers.reshape(pre_scores, shape=[-1]), + x=paddle.log(x=topk_scores), + y=paddle.reshape(pre_scores, shape=[-1]), axis=0) selected_ids, selected_scores = fluid.layers.beam_search( pre_ids=pre_ids, diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 4d12648354a88a5c59e828b92ef98fdbc551a5dd..8a8b013b6b053ad344a96ac0e55a4115808a3e41 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -1837,7 +1837,7 @@ def fast_decode( input=layers.softmax(logits), k=beam_size ) accu_scores = layers.elementwise_add( - x=layers.log(topk_scores), + x=paddle.log(topk_scores), y=paddle.reshape(pre_scores, shape=[-1]), axis=0, ) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py index cb9e92bf629cea50929129013dc2d8e104e598ee..0b5efa636afce81d7c85aa5a1be28a4b4de5108b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py @@ -435,9 +435,7 @@ class BaseModel(fluid.dygraph.Layer): cell_outputs = self._split_batch_beams(step_input) cell_outputs = self.fc(cell_outputs) - step_log_probs = fluid.layers.log( - fluid.layers.softmax(cell_outputs) - ) + step_log_probs = paddle.log(fluid.layers.softmax(cell_outputs)) noend_array = [-self.kinf] * self.tar_vocab_size noend_array[self.beam_end_token] = 0 noend_mask_tensor = to_variable( diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index 90a7b4d35efd9d043b99fbac34493b0b9d9647ee..3773187b2596c198b4353d8b28df425ab38567b5 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -329,13 +329,11 @@ def bmn_loss_func( coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio epsilon = 0.000001 - # temp = fluid.layers.log(pred_score + epsilon) - loss_pos = paddle.multiply( - fluid.layers.log(pred_score + epsilon), pmask - ) + # temp = paddle.log(pred_score + epsilon) + loss_pos = paddle.multiply(paddle.log(pred_score + epsilon), pmask) loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos) loss_neg = paddle.multiply( - fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask) + paddle.log(1.0 - pred_score + epsilon), (1.0 - pmask) ) loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg) loss = -1 * (loss_pos + loss_neg) @@ -400,12 +398,10 @@ def bmn_loss_func( coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio epsilon = 0.000001 - loss_pos = paddle.multiply( - fluid.layers.log(pred_score + epsilon), pmask - ) + loss_pos = paddle.multiply(paddle.log(pred_score + epsilon), pmask) loss_pos = coef_1 * paddle.sum(loss_pos) loss_neg = paddle.multiply( - fluid.layers.log(1.0 - pred_score + epsilon), nmask + paddle.log(1.0 - pred_score + epsilon), nmask ) loss_neg = coef_0 * paddle.sum(loss_neg) loss = -1 * (loss_pos + loss_neg) / num_entries diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 6423d0d6bbcbf17f43125c43237727e8e89ccfee..15e6827766a311b10e5e516b9bbbf35f32860513 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -122,7 +122,7 @@ def train(args, place, to_static): mask = to_variable(_mask) mask.stop_gradient = True - loss_probs = fluid.layers.log(loss_probs) + loss_probs = paddle.log(loss_probs) loss_probs = paddle.multiply(loss_probs, mask) loss_probs = paddle.sum(loss_probs, axis=-1) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py index 50d00a653170c1d5243b8dba7cc63d3191c7388d..ae7da008dfc74bb27838f657a6b82878caff1c70 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py @@ -845,7 +845,7 @@ class Transformer(Layer): ) caches = map_structure(split_batch_beams, caches) step_log_probs = split_batch_beams( - fluid.layers.log(fluid.layers.softmax(logits)) + paddle.log(fluid.layers.softmax(logits)) ) step_log_probs = mask_probs( diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 9f5bbee0fc88b08c7c601211a0d68faf6c4ad4e1..2479312a51ef54f9c51d2b167b49b1f022f71338 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -2417,8 +2417,8 @@ class TestLog(TestActivation): name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" ) - self.assertRaises(TypeError, fluid.layers.log, in1) - self.assertRaises(TypeError, fluid.layers.log, in2) + self.assertRaises(TypeError, paddle.log, in1) + self.assertRaises(TypeError, paddle.log, in2) class TestLog_ZeroDim(TestLog): diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index 0d44764e2da1bfb8a9974e9c690a78c171716c72..bc737a5ed55f4fb29645900fa2bceff3cdc4b640 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -314,7 +314,7 @@ class TestBeamSearchOpError(unittest.TestCase): probs = fluid.data(name='probs', shape=[10000], dtype='float32') topk_scores, topk_indices = fluid.layers.topk(probs, k=4) accu_scores = fluid.layers.elementwise_add( - x=fluid.layers.log(x=topk_scores), + x=paddle.log(x=topk_scores), y=paddle.reshape(pre_scores, shape=[-1]), axis=0, ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index bea24aa27393275e43aae7a976ae72db1c118b54..2a2d2ef905331730d63fb821226f218de206d488 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -71,7 +71,7 @@ class TestImperativeMnist(unittest.TestCase): dy_mask = fluid.dygraph.base.to_variable(mask) dy_mask.stop_gradient = True - loss_probs = fluid.layers.log(loss_probs) + loss_probs = paddle.log(loss_probs) loss_probs = fluid.layers.elementwise_mul(loss_probs, dy_mask) loss_probs = paddle.sum(loss_probs, axis=-1) @@ -139,7 +139,7 @@ class TestImperativeMnist(unittest.TestCase): st_loss_probs = policy(st_state) - st_loss_probs = fluid.layers.log(st_loss_probs) + st_loss_probs = paddle.log(st_loss_probs) st_loss_probs = fluid.layers.elementwise_mul(st_loss_probs, st_mask) st_loss_probs = paddle.sum(st_loss_probs, axis=-1)