未验证 提交 208f625b 编写于 作者: J JYChen 提交者: GitHub

[Fluid Clean] remove apis in fluid.layers.ops (#47867)

* remove apis in fluid.ops

* fix test_activation_nn_grad

* fix circle import error

* fix ops

* fix cos

* fix divide not inplace

* remove lazy-import part
上级 70589379
...@@ -449,8 +449,8 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): ...@@ -449,8 +449,8 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase):
communicate() communicate()
self._generate_avg_loss(main_block, loss, avg_loss) self._generate_avg_loss(main_block, loss, avg_loss)
next_local_steps = layers.cast( next_local_steps = layers.cast(
layers.ceil( paddle.ceil(
layers.sqrt( paddle.sqrt(
lr_0 lr_0
* avg_loss * avg_loss
/ (global_lr * loss_0) / (global_lr * loss_0)
......
...@@ -68,7 +68,7 @@ class GroupShardedClipGrad: ...@@ -68,7 +68,7 @@ class GroupShardedClipGrad:
merge_grad = layers.get_tensor_from_selected_rows( merge_grad = layers.get_tensor_from_selected_rows(
layers.merge_selected_rows(g) layers.merge_selected_rows(g)
) )
square = layers.square(merge_grad) square = paddle.square(merge_grad)
sum_square = layers.reduce_sum(square) sum_square = layers.reduce_sum(square)
if p.dtype == paddle.float16: if p.dtype == paddle.float16:
...@@ -133,7 +133,7 @@ class GroupShardedClipGrad: ...@@ -133,7 +133,7 @@ class GroupShardedClipGrad:
with device_guard(dev_id, "gpu"): with device_guard(dev_id, "gpu"):
paddle.distributed.all_reduce(global_norm_var, group=self._group) paddle.distributed.all_reduce(global_norm_var, group=self._group)
global_norm_var = layers.sqrt(global_norm_var) global_norm_var = paddle.sqrt(global_norm_var)
max_global_norm = layers.fill_constant( max_global_norm = layers.fill_constant(
shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm
) )
......
...@@ -69,7 +69,7 @@ class ShardingClipGrad: ...@@ -69,7 +69,7 @@ class ShardingClipGrad:
merge_grad = layers.get_tensor_from_selected_rows( merge_grad = layers.get_tensor_from_selected_rows(
layers.merge_selected_rows(g) layers.merge_selected_rows(g)
) )
square = layers.square(merge_grad) square = paddle.square(merge_grad)
sum_square = layers.reduce_sum(square) sum_square = layers.reduce_sum(square)
if p.dtype == paddle.float16: if p.dtype == paddle.float16:
...@@ -131,7 +131,7 @@ class ShardingClipGrad: ...@@ -131,7 +131,7 @@ class ShardingClipGrad:
with device_guard(dev_id, "gpu"): with device_guard(dev_id, "gpu"):
paddle.distributed.all_reduce(global_norm_var, group=self._group) paddle.distributed.all_reduce(global_norm_var, group=self._group)
global_norm_var = layers.sqrt(global_norm_var) global_norm_var = paddle.sqrt(global_norm_var)
max_global_norm = layers.fill_constant( max_global_norm = layers.fill_constant(
shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm
) )
......
...@@ -17,7 +17,7 @@ import paddle ...@@ -17,7 +17,7 @@ import paddle
from paddle.distribution import distribution from paddle.distribution import distribution
from paddle.fluid.data_feeder import check_type, convert_dtype from paddle.fluid.data_feeder import check_type, convert_dtype
from paddle.fluid.framework import _non_static_mode from paddle.fluid.framework import _non_static_mode
from paddle.fluid.layers import ops, tensor from paddle.fluid.layers import tensor
from paddle.tensor import multinomial from paddle.tensor import multinomial
...@@ -214,8 +214,8 @@ class Categorical(distribution.Distribution): ...@@ -214,8 +214,8 @@ class Categorical(distribution.Distribution):
other_logits = other.logits - paddle.max( other_logits = other.logits - paddle.max(
other.logits, axis=-1, keepdim=True other.logits, axis=-1, keepdim=True
) )
e_logits = ops.exp(logits) e_logits = paddle.exp(logits)
other_e_logits = ops.exp(other_logits) other_e_logits = paddle.exp(other_logits)
z = paddle.sum(e_logits, axis=-1, keepdim=True) z = paddle.sum(e_logits, axis=-1, keepdim=True)
other_z = paddle.sum(other_e_logits, axis=-1, keepdim=True) other_z = paddle.sum(other_e_logits, axis=-1, keepdim=True)
prob = e_logits / z prob = e_logits / z
...@@ -255,7 +255,7 @@ class Categorical(distribution.Distribution): ...@@ -255,7 +255,7 @@ class Categorical(distribution.Distribution):
""" """
name = self.name + '_entropy' name = self.name + '_entropy'
logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True) logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True)
e_logits = ops.exp(logits) e_logits = paddle.exp(logits)
z = paddle.sum(e_logits, axis=-1, keepdim=True) z = paddle.sum(e_logits, axis=-1, keepdim=True)
prob = e_logits / z prob = e_logits / z
......
...@@ -23,7 +23,6 @@ from paddle.fluid.layers import ( ...@@ -23,7 +23,6 @@ from paddle.fluid.layers import (
elementwise_div, elementwise_div,
elementwise_sub, elementwise_sub,
nn, nn,
ops,
tensor, tensor,
) )
...@@ -288,7 +287,7 @@ class Normal(distribution.Distribution): ...@@ -288,7 +287,7 @@ class Normal(distribution.Distribution):
var = self.scale * self.scale var = self.scale * self.scale
return elementwise_div( return elementwise_div(
ops.exp( paddle.exp(
-1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var) -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var)
), ),
(math.sqrt(2 * math.pi) * self.scale), (math.sqrt(2 * math.pi) * self.scale),
......
...@@ -72,7 +72,7 @@ def _squared_l2_norm(x): ...@@ -72,7 +72,7 @@ def _squared_l2_norm(x):
or x.dtype == core.VarDesc.VarType.FP16 or x.dtype == core.VarDesc.VarType.FP16
or x.dtype == core.VarDesc.VarType.BF16 or x.dtype == core.VarDesc.VarType.BF16
): ):
square = layers.square(x) square = paddle.square(x)
sum_square = layers.reduce_sum(square) sum_square = layers.reduce_sum(square)
return sum_square return sum_square
...@@ -540,7 +540,7 @@ class ClipGradByGlobalNorm(ClipGradBase): ...@@ -540,7 +540,7 @@ class ClipGradByGlobalNorm(ClipGradBase):
global_norm_var_fp64 = paddle.add_n(sum_square_list) global_norm_var_fp64 = paddle.add_n(sum_square_list)
global_norm_var.append(global_norm_var_fp64) global_norm_var.append(global_norm_var_fp64)
global_norm_var = paddle.add_n(global_norm_var) global_norm_var = paddle.add_n(global_norm_var)
global_norm_var = layers.sqrt(global_norm_var) global_norm_var = paddle.sqrt(global_norm_var)
max_global_norm = layers.fill_constant( max_global_norm = layers.fill_constant(
shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm
) )
...@@ -648,7 +648,7 @@ class ClipGradByGlobalNorm(ClipGradBase): ...@@ -648,7 +648,7 @@ class ClipGradByGlobalNorm(ClipGradBase):
if len(global_norm_var) > 1 if len(global_norm_var) > 1
else global_norm_var[0] else global_norm_var[0]
) )
global_norm_var = layers.sqrt(x=global_norm_var) global_norm_var = paddle.sqrt(x=global_norm_var)
max_global_norm = layers.fill_constant( max_global_norm = layers.fill_constant(
shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm
) )
...@@ -727,7 +727,7 @@ class ClipGradByGlobalNorm(ClipGradBase): ...@@ -727,7 +727,7 @@ class ClipGradByGlobalNorm(ClipGradBase):
group_scale_name = self.group_name + "_scale" group_scale_name = self.group_name + "_scale"
if group_scale_name not in self.context: if group_scale_name not in self.context:
group_norm_var = layers.sums(input=self.context[self.group_name]) group_norm_var = layers.sums(input=self.context[self.group_name])
group_norm_var = layers.sqrt(x=group_norm_var) group_norm_var = paddle.sqrt(x=group_norm_var)
clip_var = self.context[self.group_name + "_clip"] clip_var = self.context[self.group_name + "_clip"]
group_scale_var = layers.elementwise_div( group_scale_var = layers.elementwise_div(
x=clip_var, x=clip_var,
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import copy import copy
import paddle
from paddle.fluid import layers, unique_name from paddle.fluid import layers, unique_name
from paddle.fluid.dygraph import Layer from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
...@@ -95,8 +96,8 @@ class BasicGRUUnit(Layer): ...@@ -95,8 +96,8 @@ class BasicGRUUnit(Layer):
self._hiden_size = hidden_size self._hiden_size = hidden_size
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid self._gate_activation = gate_activation or paddle.nn.functional.sigmoid
self._activation = activation or layers.tanh self._activation = activation or paddle.tanh
self._dtype = dtype self._dtype = dtype
def _build_once(self, input, pre_hidden): def _build_once(self, input, pre_hidden):
...@@ -845,8 +846,8 @@ class BasicLSTMUnit(Layer): ...@@ -845,8 +846,8 @@ class BasicLSTMUnit(Layer):
self._hiden_size = hidden_size self._hiden_size = hidden_size
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid self._gate_activation = gate_activation or paddle.nn.functional.sigmoid
self._activation = activation or layers.tanh self._activation = activation or paddle.tanh
self._forget_bias = layers.fill_constant( self._forget_bias = layers.fill_constant(
[1], dtype=dtype, value=forget_bias [1], dtype=dtype, value=forget_bias
) )
...@@ -879,10 +880,14 @@ class BasicLSTMUnit(Layer): ...@@ -879,10 +880,14 @@ class BasicLSTMUnit(Layer):
new_cell = layers.elementwise_add( new_cell = layers.elementwise_add(
layers.elementwise_mul( layers.elementwise_mul(
pre_cell, pre_cell,
layers.sigmoid(layers.elementwise_add(f, self._forget_bias)), paddle.nn.functional.sigmoid(
layers.elementwise_add(f, self._forget_bias)
),
),
layers.elementwise_mul(
paddle.nn.functional.sigmoid(i), paddle.tanh(j)
), ),
layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)),
) )
new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) new_hidden = paddle.tanh(new_cell) * paddle.nn.functional.sigmoid(o)
return new_hidden, new_cell return new_hidden, new_cell
...@@ -17,6 +17,7 @@ import time ...@@ -17,6 +17,7 @@ import time
import sys import sys
import logging import logging
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from ....log_helper import get_logger from ....log_helper import get_logger
...@@ -41,7 +42,9 @@ ZETA = 1.1 ...@@ -41,7 +42,9 @@ ZETA = 1.1
def compute_soft_rounding(alpha_v): def compute_soft_rounding(alpha_v):
return fluid.layers.clip( return fluid.layers.clip(
fluid.layers.sigmoid(alpha_v) * (ZETA - GAMMA) + GAMMA, min=0, max=1 paddle.nn.functional.sigmoid(alpha_v) * (ZETA - GAMMA) + GAMMA,
min=0,
max=1,
) )
...@@ -73,8 +76,7 @@ class AdaRoundLoss: ...@@ -73,8 +76,7 @@ class AdaRoundLoss:
# calculate regularization term - which ensures parameter to converge to exactly zeros and ones # calculate regularization term - which ensures parameter to converge to exactly zeros and ones
# at the end of optimization # at the end of optimization
reg_term = fluid.layers.reduce_sum( reg_term = fluid.layers.reduce_sum(
-fluid.layers.pow(fluid.layers.abs(2 * h_v - 1), factor=beta) -fluid.layers.pow(paddle.abs(2 * h_v - 1), factor=beta) + 1
+ 1
) )
# calculate the rounding loss # calculate the rounding loss
......
...@@ -82,7 +82,7 @@ def bow_net( ...@@ -82,7 +82,7 @@ def bow_net(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
......
...@@ -270,12 +270,10 @@ class NaturalExpDecay(LearningRateDecay): ...@@ -270,12 +270,10 @@ class NaturalExpDecay(LearningRateDecay):
self.staircase = staircase self.staircase = staircase
def step(self): def step(self):
from .. import layers
div_res = self.create_lr_var(self.step_num / self.decay_steps) div_res = self.create_lr_var(self.step_num / self.decay_steps)
if self.staircase: if self.staircase:
div_res = layers.floor(div_res) div_res = paddle.floor(div_res)
decayed_lr = self.learning_rate * layers.exp( decayed_lr = self.learning_rate * paddle.exp(
-1 * self.decay_rate * div_res -1 * self.decay_rate * div_res
) )
...@@ -356,11 +354,9 @@ class ExponentialDecay(LearningRateDecay): ...@@ -356,11 +354,9 @@ class ExponentialDecay(LearningRateDecay):
self.staircase = staircase self.staircase = staircase
def step(self): def step(self):
from .. import layers
div_res = self.create_lr_var(self.step_num / self.decay_steps) div_res = self.create_lr_var(self.step_num / self.decay_steps)
if self.staircase: if self.staircase:
div_res = layers.floor(div_res) div_res = paddle.floor(div_res)
decayed_lr = self.learning_rate * (self.decay_rate**div_res) decayed_lr = self.learning_rate * (self.decay_rate**div_res)
...@@ -437,11 +433,9 @@ class InverseTimeDecay(LearningRateDecay): ...@@ -437,11 +433,9 @@ class InverseTimeDecay(LearningRateDecay):
self.staircase = staircase self.staircase = staircase
def step(self): def step(self):
from .. import layers
div_res = self.create_lr_var(self.step_num / self.decay_steps) div_res = self.create_lr_var(self.step_num / self.decay_steps)
if self.staircase: if self.staircase:
div_res = layers.floor(div_res) div_res = paddle.floor(div_res)
decayed_lr = self.learning_rate / (1 + self.decay_rate * div_res) decayed_lr = self.learning_rate / (1 + self.decay_rate * div_res)
...@@ -524,12 +518,10 @@ class PolynomialDecay(LearningRateDecay): ...@@ -524,12 +518,10 @@ class PolynomialDecay(LearningRateDecay):
self.cycle = cycle self.cycle = cycle
def step(self): def step(self):
from .. import layers
tmp_step_num = self.step_num tmp_step_num = self.step_num
tmp_decay_steps = self.decay_steps tmp_decay_steps = self.decay_steps
if self.cycle: if self.cycle:
div_res = layers.ceil( div_res = paddle.ceil(
self.create_lr_var(tmp_step_num / float(self.decay_steps)) self.create_lr_var(tmp_step_num / float(self.decay_steps))
) )
...@@ -601,15 +593,13 @@ class CosineDecay(LearningRateDecay): ...@@ -601,15 +593,13 @@ class CosineDecay(LearningRateDecay):
self.epochs = epochs self.epochs = epochs
def step(self): def step(self):
from .. import layers cur_epoch = paddle.floor(
cur_epoch = layers.floor(
self.create_lr_var(self.step_num / self.step_each_epoch) self.create_lr_var(self.step_num / self.step_each_epoch)
) )
decayed_lr = ( decayed_lr = (
self.learning_rate self.learning_rate
* 0.5 * 0.5
* (layers.cos(cur_epoch * math.pi / self.epochs) + 1) * (paddle.cos(cur_epoch * math.pi / self.epochs) + 1)
) )
return decayed_lr return decayed_lr
......
...@@ -12,10 +12,9 @@ ...@@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
from . import Layer from . import Layer
from ..layers import ( from ..layers import (
sigmoid,
tanh,
concat, concat,
fill_constant, fill_constant,
matmul, matmul,
...@@ -139,8 +138,8 @@ class LSTMCell(Layer): ...@@ -139,8 +138,8 @@ class LSTMCell(Layer):
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._dtype = dtype self._dtype = dtype
self._gate_activation = gate_activation or sigmoid self._gate_activation = gate_activation or paddle.nn.functional.sigmoid
self._activation = activation or tanh self._activation = activation or paddle.tanh
self._use_cudnn_impl = use_cudnn_impl self._use_cudnn_impl = use_cudnn_impl
if self._use_cudnn_impl: if self._use_cudnn_impl:
...@@ -254,7 +253,9 @@ class LSTMCell(Layer): ...@@ -254,7 +253,9 @@ class LSTMCell(Layer):
elementwise_add(f, self._forget_bias) elementwise_add(f, self._forget_bias)
), ),
), ),
elementwise_mul(sigmoid(i), tanh(j)), elementwise_mul(
paddle.nn.functional.sigmoid(i), paddle.tanh(j)
),
) )
new_hidden = self._activation(new_cell) * self._gate_activation(o) new_hidden = self._activation(new_cell) * self._gate_activation(o)
...@@ -357,8 +358,8 @@ class GRUCell(Layer): ...@@ -357,8 +358,8 @@ class GRUCell(Layer):
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._dtype = dtype self._dtype = dtype
self._gate_activation = gate_activation or sigmoid self._gate_activation = gate_activation or paddle.nn.functional.sigmoid
self._activation = activation or tanh self._activation = activation or paddle.tanh
self._use_cudnn_impl = use_cudnn_impl self._use_cudnn_impl = use_cudnn_impl
if self._use_cudnn_impl: if self._use_cudnn_impl:
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from . import ops
from .ops import *
from . import nn from . import nn
from .nn import * from .nn import *
from . import io from . import io
...@@ -43,7 +41,6 @@ __all__ += nn.__all__ ...@@ -43,7 +41,6 @@ __all__ += nn.__all__
__all__ += io.__all__ __all__ += io.__all__
__all__ += tensor.__all__ __all__ += tensor.__all__
__all__ += control_flow.__all__ __all__ += control_flow.__all__
__all__ += ops.__all__
__all__ += device.__all__ __all__ += device.__all__
__all__ += detection.__all__ __all__ += detection.__all__
__all__ += metric_op.__all__ __all__ += metric_op.__all__
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
from ..wrapped_decorator import signature_safe_contextmanager from ..wrapped_decorator import signature_safe_contextmanager
from .layer_function_generator import autodoc, templatedoc from .layer_function_generator import templatedoc
from .tensor import assign, cast, fill_constant from .tensor import assign, cast, fill_constant
from .. import core from .. import core
from ..framework import ( from ..framework import (
......
...@@ -17,15 +17,13 @@ All layers just related to the detection neural network. ...@@ -17,15 +17,13 @@ All layers just related to the detection neural network.
import paddle import paddle
from .layer_function_generator import generate_layer_fn from .layer_function_generator import templatedoc
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..framework import Variable, _non_static_mode, static_only, in_dygraph_mode from ..framework import Variable, _non_static_mode, static_only, in_dygraph_mode
from .. import core from .. import core
from .loss import softmax_with_cross_entropy from .loss import softmax_with_cross_entropy
from . import tensor from . import tensor
from . import nn from . import nn
from . import ops
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math import math
import numpy as np import numpy as np
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
from . import control_flow from . import control_flow
from . import tensor from . import tensor
from . import ops
from . import nn from . import nn
import math import math
import numpy as np import numpy as np
...@@ -535,8 +534,8 @@ class Categorical(Distribution): ...@@ -535,8 +534,8 @@ class Categorical(Distribution):
other_logits = other.logits - nn.reduce_max( other_logits = other.logits - nn.reduce_max(
other.logits, dim=-1, keep_dim=True other.logits, dim=-1, keep_dim=True
) )
e_logits = ops.exp(logits) e_logits = paddle.exp(logits)
other_e_logits = ops.exp(other_logits) other_e_logits = paddle.exp(other_logits)
z = nn.reduce_sum(e_logits, dim=-1, keep_dim=True) z = nn.reduce_sum(e_logits, dim=-1, keep_dim=True)
other_z = nn.reduce_sum(other_e_logits, dim=-1, keep_dim=True) other_z = nn.reduce_sum(other_e_logits, dim=-1, keep_dim=True)
prob = e_logits / z prob = e_logits / z
...@@ -556,7 +555,7 @@ class Categorical(Distribution): ...@@ -556,7 +555,7 @@ class Categorical(Distribution):
""" """
logits = self.logits - nn.reduce_max(self.logits, dim=-1, keep_dim=True) logits = self.logits - nn.reduce_max(self.logits, dim=-1, keep_dim=True)
e_logits = ops.exp(logits) e_logits = paddle.exp(logits)
z = nn.reduce_sum(e_logits, dim=-1, keep_dim=True) z = nn.reduce_sum(e_logits, dim=-1, keep_dim=True)
prob = e_logits / z prob = e_logits / z
entropy = -1.0 * nn.reduce_sum( entropy = -1.0 * nn.reduce_sum(
......
...@@ -19,7 +19,6 @@ import threading ...@@ -19,7 +19,6 @@ import threading
from ..data_feeder import DataFeeder from ..data_feeder import DataFeeder
from .control_flow import BlockGuard from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core from .. import core
from ..executor import global_scope from ..executor import global_scope
from ..framework import ( from ..framework import (
......
...@@ -45,14 +45,11 @@ __all__ = [ ...@@ -45,14 +45,11 @@ __all__ = [
def _convert_(name): def _convert_(name):
""" """
Formatting. Formatting.
Args: Args:
name: The name/alias name: The name/alias
This function takes in a name and converts it to a standard format of This function takes in a name and converts it to a standard format of
group1_group2. Where as per the regular expression, group1 can have group1_group2. Where as per the regular expression, group1 can have
alphabets and numbers and group2 has capital alphabets. alphabets and numbers and group2 has capital alphabets.
""" """
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
...@@ -80,10 +77,8 @@ def _generate_doc_string_( ...@@ -80,10 +77,8 @@ def _generate_doc_string_(
): ):
""" """
Generate docstring by OpProto Generate docstring by OpProto
Args: Args:
op_proto (framework_pb2.OpProto): a protobuf message typed OpProto op_proto (framework_pb2.OpProto): a protobuf message typed OpProto
Returns: Returns:
str: the document string str: the document string
""" """
...@@ -148,13 +143,10 @@ def _generate_doc_string_( ...@@ -148,13 +143,10 @@ def _generate_doc_string_(
def generate_layer_fn(op_type): def generate_layer_fn(op_type):
"""Register the Python layer for an Operator. """Register the Python layer for an Operator.
Args: Args:
op_type: The name of the operator to be created. op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, mean , average etc) and This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality. creates the operator functionality.
""" """
op_proto = OpProtoHolder.instance().get_op_proto(op_type) op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = [ not_intermediate_outputs = [
...@@ -271,13 +263,10 @@ def generate_layer_fn(op_type): ...@@ -271,13 +263,10 @@ def generate_layer_fn(op_type):
def generate_activation_fn(op_type): def generate_activation_fn(op_type):
"""Register the Python layer for an Operator without Attribute. """Register the Python layer for an Operator without Attribute.
Args: Args:
op_type: The name of the operator to be created. op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, exp , tanh etc) and This function takes in the operator type (sigmoid, exp , tanh etc) and
creates the operator functionality. creates the operator functionality.
""" """
op_proto = OpProtoHolder.instance().get_op_proto(op_type) op_proto = OpProtoHolder.instance().get_op_proto(op_type)
...@@ -330,10 +319,8 @@ def generate_activation_fn(op_type): ...@@ -330,10 +319,8 @@ def generate_activation_fn(op_type):
def generate_inplace_fn(inplace_op_type): def generate_inplace_fn(inplace_op_type):
"""Register the Python layer for an Inplace Operator without Attribute. """Register the Python layer for an Inplace Operator without Attribute.
Args: Args:
inplace_op_type: The name of the inplace operator to be created. inplace_op_type: The name of the inplace operator to be created.
This function takes in the inplace operator type (exp_ , ceil_ etc) and This function takes in the inplace operator type (exp_ , ceil_ etc) and
creates the operator functionality. creates the operator functionality.
""" """
...@@ -378,12 +365,10 @@ def templatedoc(op_type=None): ...@@ -378,12 +365,10 @@ def templatedoc(op_type=None):
""" """
Decorator of layer function. It will use the docstring from the layer Decorator of layer function. It will use the docstring from the layer
function as the template. The template arguments are: function as the template. The template arguments are:
* ${comment}: The operator comment written in CPP. * ${comment}: The operator comment written in CPP.
* ${{name}_comment}: The comment of ${name} written with AddAttr, AddOutput, * ${{name}_comment}: The comment of ${name} written with AddAttr, AddOutput,
and AddInput. The ${name} is Python snake style. i.e., xxx_xxx. and AddInput. The ${name} is Python snake style. i.e., xxx_xxx.
* ${{name}_type}: The type of ${name}. * ${{name}_type}: The type of ${name}.
Returns: Returns:
Decorated function. Decorated function.
""" """
...@@ -438,7 +423,6 @@ def templatedoc(op_type=None): ...@@ -438,7 +423,6 @@ def templatedoc(op_type=None):
def add_sample_code(func, sample_code): def add_sample_code(func, sample_code):
""" """
Append sample code for dynamically generated functions. Append sample code for dynamically generated functions.
Args: Args:
func: The function of the function to be append sample code to. func: The function of the function to be append sample code to.
sample_code: sample code session in rst format. sample_code: sample code session in rst format.
......
...@@ -26,7 +26,6 @@ import numbers ...@@ -26,7 +26,6 @@ import numbers
import paddle import paddle
from . import control_flow from . import control_flow
from . import nn from . import nn
from . import ops
from . import tensor from . import tensor
from ..framework import default_main_program, Parameter, unique_name, name_scope from ..framework import default_main_program, Parameter, unique_name, name_scope
from ..framework import Variable from ..framework import Variable
...@@ -171,7 +170,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -171,7 +170,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
div_res = global_step / decay_steps div_res = global_step / decay_steps
if staircase: if staircase:
div_res = ops.floor(div_res) div_res = paddle.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res) decayed_lr = learning_rate * (decay_rate**div_res)
return decayed_lr return decayed_lr
...@@ -233,8 +232,8 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -233,8 +232,8 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
div_res = global_step / decay_steps div_res = global_step / decay_steps
if staircase: if staircase:
div_res = ops.floor(div_res) div_res = paddle.floor(div_res)
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res) decayed_lr = learning_rate * paddle.exp(-1 * decay_rate * div_res)
return decayed_lr return decayed_lr
...@@ -293,7 +292,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -293,7 +292,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
div_res = global_step / decay_steps div_res = global_step / decay_steps
if staircase: if staircase:
div_res = ops.floor(div_res) div_res = paddle.floor(div_res)
decayed_lr = learning_rate / (1 + decay_rate * div_res) decayed_lr = learning_rate / (1 + decay_rate * div_res)
...@@ -347,7 +346,7 @@ def polynomial_decay( ...@@ -347,7 +346,7 @@ def polynomial_decay(
global_step = _decay_step_counter() global_step = _decay_step_counter()
if cycle: if cycle:
div_res = ops.ceil(global_step / decay_steps) div_res = paddle.ceil(global_step / decay_steps)
zero_var = tensor.fill_constant( zero_var = tensor.fill_constant(
shape=[1], dtype='float32', value=0.0 shape=[1], dtype='float32', value=0.0
) )
...@@ -497,11 +496,11 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): ...@@ -497,11 +496,11 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
else: else:
global_step = _decay_step_counter() global_step = _decay_step_counter()
cur_epoch = ops.floor(global_step / step_each_epoch) cur_epoch = paddle.floor(global_step / step_each_epoch)
decayed_lr = ( decayed_lr = (
learning_rate learning_rate
* 0.5 * 0.5
* (ops.cos(cur_epoch * math.pi / epochs) + 1) * (paddle.cos(cur_epoch * math.pi / epochs) + 1)
) )
return decayed_lr return decayed_lr
......
...@@ -1737,7 +1737,6 @@ def kldiv_loss(x, target, reduction='mean', name=None): ...@@ -1737,7 +1737,6 @@ def kldiv_loss(x, target, reduction='mean', name=None):
return loss return loss
from .ops import square
from .control_flow import equal from .control_flow import equal
......
此差异已折叠。
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
from . import layers from . import layers
from .data_feeder import check_variable_and_dtype, convert_dtype from .data_feeder import check_variable_and_dtype, convert_dtype
from ..utils import deprecated from ..utils import deprecated
...@@ -387,7 +388,7 @@ def glu(input, dim=-1): ...@@ -387,7 +388,7 @@ def glu(input, dim=-1):
input, 'input', ['float16', 'float32', 'float64'], "glu" input, 'input', ['float16', 'float32', 'float64'], "glu"
) )
a, b = layers.split(input, num_or_sections=2, dim=dim) a, b = layers.split(input, num_or_sections=2, dim=dim)
act_b = layers.sigmoid(x=b) act_b = paddle.nn.functional.sigmoid(x=b)
out = layers.elementwise_mul(x=a, y=act_b) out = layers.elementwise_mul(x=a, y=act_b)
return out return out
......
...@@ -48,7 +48,6 @@ from .clip import ( ...@@ -48,7 +48,6 @@ from .clip import (
from .framework import program_guard from .framework import program_guard
from .initializer import Constant from .initializer import Constant
from .layer_helper import LayerHelper from .layer_helper import LayerHelper
from .layers import ops
from .dygraph import base as imperative_base from .dygraph import base as imperative_base
from .dygraph import no_grad from .dygraph import no_grad
from .dygraph.learning_rate_scheduler import ( from .dygraph.learning_rate_scheduler import (
...@@ -4457,7 +4456,7 @@ class ModelAverage(Optimizer): ...@@ -4457,7 +4456,7 @@ class ModelAverage(Optimizer):
sum = layers.cast( sum = layers.cast(
x=sum, dtype='float32' if self._dtype is None else self._dtype x=sum, dtype='float32' if self._dtype is None else self._dtype
) )
ops._elementwise_div(x=sum, y=tmp, out=param) paddle.assign(paddle.divide(sum, tmp), output=param)
def _add_average_restore_op(self, block, param_grad): def _add_average_restore_op(self, block, param_grad):
param = block._clone_variable(param_grad[0]) param = block._clone_variable(param_grad[0])
......
...@@ -70,21 +70,21 @@ def dyn_rnn_lstm( ...@@ -70,21 +70,21 @@ def dyn_rnn_lstm(
gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False)
return gate0 + gate1 return gate0 + gate1
forget_gate = fluid.layers.sigmoid( forget_gate = paddle.nn.functional.sigmoid(
x=gate_common(word, prev_hidden, lstm_size) x=gate_common(word, prev_hidden, lstm_size)
) )
input_gate = fluid.layers.sigmoid( input_gate = paddle.nn.functional.sigmoid(
x=gate_common(word, prev_hidden, lstm_size) x=gate_common(word, prev_hidden, lstm_size)
) )
output_gate = fluid.layers.sigmoid( output_gate = paddle.nn.functional.sigmoid(
x=gate_common(word, prev_hidden, lstm_size) x=gate_common(word, prev_hidden, lstm_size)
) )
cell_gate = fluid.layers.sigmoid( cell_gate = paddle.nn.functional.sigmoid(
x=gate_common(word, prev_hidden, lstm_size) x=gate_common(word, prev_hidden, lstm_size)
) )
cell = forget_gate * prev_cell + input_gate * cell_gate cell = forget_gate * prev_cell + input_gate * cell_gate
hidden = output_gate * fluid.layers.tanh(x=cell) hidden = output_gate * paddle.tanh(x=cell)
rnn.update_memory(prev_cell, cell) rnn.update_memory(prev_cell, cell)
rnn.update_memory(prev_hidden, hidden) rnn.update_memory(prev_hidden, hidden)
rnn.output(hidden) rnn.output(hidden)
......
...@@ -70,10 +70,10 @@ def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): ...@@ -70,10 +70,10 @@ def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
def linear(inputs): def linear(inputs):
return fluid.layers.fc(input=inputs, size=size, bias_attr=True) return fluid.layers.fc(input=inputs, size=size, bias_attr=True)
forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) forget_gate = paddle.nn.functional.sigmoid(x=linear([hidden_t_prev, x_t]))
input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) input_gate = paddle.nn.functional.sigmoid(x=linear([hidden_t_prev, x_t]))
output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) output_gate = paddle.nn.functional.sigmoid(x=linear([hidden_t_prev, x_t]))
cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t])) cell_tilde = paddle.tanh(x=linear([hidden_t_prev, x_t]))
cell_t = fluid.layers.sums( cell_t = fluid.layers.sums(
input=[ input=[
...@@ -83,7 +83,7 @@ def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): ...@@ -83,7 +83,7 @@ def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
) )
hidden_t = fluid.layers.elementwise_mul( hidden_t = fluid.layers.elementwise_mul(
x=output_gate, y=fluid.layers.tanh(x=cell_t) x=output_gate, y=paddle.tanh(x=cell_t)
) )
return hidden_t, cell_t return hidden_t, cell_t
......
...@@ -175,12 +175,12 @@ class TestIfElse(unittest.TestCase): ...@@ -175,12 +175,12 @@ class TestIfElse(unittest.TestCase):
ie = layers.IfElse(ifcond) ie = layers.IfElse(ifcond)
with ie.true_block(): with ie.true_block():
true_target = ie.input(src) true_target = ie.input(src)
true_target = fluid.layers.exp(true_target) true_target = paddle.exp(true_target)
ie.output(true_target) ie.output(true_target)
with ie.false_block(): with ie.false_block():
false_target = ie.input(src) false_target = ie.input(src)
false_target = fluid.layers.tanh(false_target) false_target = paddle.tanh(false_target)
ie.output(false_target) ie.output(false_target)
if_out = ie() if_out = ie()
out = layers.reduce_sum(if_out[0]) out = layers.reduce_sum(if_out[0])
...@@ -244,7 +244,7 @@ class TestIfElseError(unittest.TestCase): ...@@ -244,7 +244,7 @@ class TestIfElseError(unittest.TestCase):
ie = layers.IfElse(ifcond) ie = layers.IfElse(ifcond)
with ie.true_block(): with ie.true_block():
true_target = ie.input(src) true_target = ie.input(src)
true_target = fluid.layers.exp(true_target) true_target = paddle.exp(true_target)
ie.output([]) ie.output([])
......
...@@ -130,7 +130,7 @@ def train_network( ...@@ -130,7 +130,7 @@ def train_network(
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -157,7 +157,7 @@ def train_network( ...@@ -157,7 +157,7 @@ def train_network(
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -181,7 +181,7 @@ def train_network( ...@@ -181,7 +181,7 @@ def train_network(
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -61,7 +61,7 @@ def dyfunc_with_if_else2(x, col=100): ...@@ -61,7 +61,7 @@ def dyfunc_with_if_else2(x, col=100):
y = fluid.layers.relu(x) y = fluid.layers.relu(x)
else: else:
x_pow = fluid.layers.pow(x, 2) x_pow = fluid.layers.pow(x, 2)
y = fluid.layers.tanh(x_pow) y = paddle.tanh(x_pow)
return y return y
...@@ -161,7 +161,7 @@ def nested_if_else(x_v): ...@@ -161,7 +161,7 @@ def nested_if_else(x_v):
tmp = y * w tmp = y * w
y = fluid.layers.relu(tmp) y = fluid.layers.relu(tmp)
if paddle.mean(y).numpy()[0] < batch_size: if paddle.mean(y).numpy()[0] < batch_size:
y = fluid.layers.abs(y) y = paddle.abs(y)
else: else:
tmp = fluid.layers.fill_constant( tmp = fluid.layers.fill_constant(
y.shape, dtype='float32', value=-1 y.shape, dtype='float32', value=-1
...@@ -276,7 +276,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -276,7 +276,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
self.constant_vars['w'] = fluid.layers.fill_constant( self.constant_vars['w'] = fluid.layers.fill_constant(
[hidden_dim], dtype='float32', value=9 [hidden_dim], dtype='float32', value=9
) )
y = fluid.layers.abs(y) y = paddle.abs(y)
else: else:
tmp = fluid.layers.fill_constant( tmp = fluid.layers.fill_constant(
y.shape, dtype='float32', value=-1 y.shape, dtype='float32', value=-1
......
...@@ -49,8 +49,8 @@ class BasicLSTMUnit(Layer): ...@@ -49,8 +49,8 @@ class BasicLSTMUnit(Layer):
self._hiden_size = hidden_size self._hiden_size = hidden_size
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid self._gate_activation = gate_activation or paddle.nn.functional.sigmoid
self._activation = activation or layers.tanh self._activation = activation or paddle.tanh
self._forget_bias = forget_bias self._forget_bias = forget_bias
self._dtype = dtype self._dtype = dtype
self._input_size = input_size self._input_size = input_size
...@@ -76,12 +76,14 @@ class BasicLSTMUnit(Layer): ...@@ -76,12 +76,14 @@ class BasicLSTMUnit(Layer):
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
new_cell = layers.elementwise_add( new_cell = layers.elementwise_add(
layers.elementwise_mul( layers.elementwise_mul(
pre_cell, layers.sigmoid(f + self._forget_bias) pre_cell, paddle.nn.functional.sigmoid(f + self._forget_bias)
),
layers.elementwise_mul(
paddle.nn.functional.sigmoid(i), paddle.tanh(j)
), ),
layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)),
) )
new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) new_hidden = paddle.tanh(new_cell) * paddle.nn.functional.sigmoid(o)
return new_hidden, new_cell return new_hidden, new_cell
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.param_attr as attr import paddle.fluid.param_attr as attr
...@@ -232,7 +231,7 @@ class SoftsignLayer: ...@@ -232,7 +231,7 @@ class SoftsignLayer:
""" """
operation operation
""" """
softsign = fluid.layers.softsign(input) softsign = paddle.nn.functional.softsign(input)
return softsign return softsign
......
...@@ -89,28 +89,22 @@ class Cycle_Gan(fluid.dygraph.Layer): ...@@ -89,28 +89,22 @@ class Cycle_Gan(fluid.dygraph.Layer):
cyc_A = self.build_generator_resnet_9blocks_b(fake_B) cyc_A = self.build_generator_resnet_9blocks_b(fake_B)
cyc_B = self.build_generator_resnet_9blocks_a(fake_A) cyc_B = self.build_generator_resnet_9blocks_a(fake_A)
diff_A = fluid.layers.abs( diff_A = paddle.abs(fluid.layers.elementwise_sub(x=input_A, y=cyc_A))
fluid.layers.elementwise_sub(x=input_A, y=cyc_A) diff_B = paddle.abs(fluid.layers.elementwise_sub(x=input_B, y=cyc_B))
)
diff_B = fluid.layers.abs(
fluid.layers.elementwise_sub(x=input_B, y=cyc_B)
)
cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A
cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B
cyc_loss = cyc_A_loss + cyc_B_loss cyc_loss = cyc_A_loss + cyc_B_loss
fake_rec_A = self.build_gen_discriminator_a(fake_B) fake_rec_A = self.build_gen_discriminator_a(fake_B)
g_A_loss = fluid.layers.reduce_mean(fluid.layers.square(fake_rec_A - 1)) g_A_loss = paddle.mean(paddle.square(fake_rec_A - 1))
fake_rec_B = self.build_gen_discriminator_b(fake_A) fake_rec_B = self.build_gen_discriminator_b(fake_A)
g_B_loss = fluid.layers.reduce_mean(fluid.layers.square(fake_rec_B - 1)) g_B_loss = paddle.mean(paddle.square(fake_rec_B - 1))
G = g_A_loss + g_B_loss G = g_A_loss + g_B_loss
idt_A = self.build_generator_resnet_9blocks_a(input_B) idt_A = self.build_generator_resnet_9blocks_a(input_B)
idt_loss_A = ( idt_loss_A = (
fluid.layers.reduce_mean( fluid.layers.reduce_mean(
fluid.layers.abs( paddle.abs(fluid.layers.elementwise_sub(x=input_B, y=idt_A))
fluid.layers.elementwise_sub(x=input_B, y=idt_A)
)
) )
* lambda_B * lambda_B
* lambda_identity * lambda_identity
...@@ -119,9 +113,7 @@ class Cycle_Gan(fluid.dygraph.Layer): ...@@ -119,9 +113,7 @@ class Cycle_Gan(fluid.dygraph.Layer):
idt_B = self.build_generator_resnet_9blocks_b(input_A) idt_B = self.build_generator_resnet_9blocks_b(input_A)
idt_loss_B = ( idt_loss_B = (
fluid.layers.reduce_mean( fluid.layers.reduce_mean(
fluid.layers.abs( paddle.abs(fluid.layers.elementwise_sub(x=input_A, y=idt_B))
fluid.layers.elementwise_sub(x=input_A, y=idt_B)
)
) )
* lambda_A * lambda_A
* lambda_identity * lambda_identity
...@@ -271,7 +263,7 @@ class build_generator_resnet_9blocks(fluid.dygraph.Layer): ...@@ -271,7 +263,7 @@ class build_generator_resnet_9blocks(fluid.dygraph.Layer):
y = self.deconv1(y) y = self.deconv1(y)
y = fluid.layers.pad2d(y, [3, 3, 3, 3], mode="reflect") y = fluid.layers.pad2d(y, [3, 3, 3, 3], mode="reflect")
y = self.conv3(y) y = self.conv3(y)
y = fluid.layers.tanh(y) y = paddle.tanh(y)
return y return y
...@@ -647,8 +639,7 @@ def train(args, to_static): ...@@ -647,8 +639,7 @@ def train(args, to_static):
data_B, fake_pool_B data_B, fake_pool_B
) )
d_loss_A = ( d_loss_A = (
fluid.layers.square(fake_pool_rec_B) paddle.square(fake_pool_rec_B) + paddle.square(rec_B - 1)
+ fluid.layers.square(rec_B - 1)
) / 2.0 ) / 2.0
d_loss_A = fluid.layers.reduce_mean(d_loss_A) d_loss_A = fluid.layers.reduce_mean(d_loss_A)
...@@ -661,8 +652,7 @@ def train(args, to_static): ...@@ -661,8 +652,7 @@ def train(args, to_static):
data_A, fake_pool_A data_A, fake_pool_A
) )
d_loss_B = ( d_loss_B = (
fluid.layers.square(fake_pool_rec_A) paddle.square(fake_pool_rec_A) + paddle.square(rec_A - 1)
+ fluid.layers.square(rec_A - 1)
) / 2.0 ) / 2.0
d_loss_B = fluid.layers.reduce_mean(d_loss_B) d_loss_B = fluid.layers.reduce_mean(d_loss_B)
......
...@@ -99,10 +99,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -99,10 +99,10 @@ class SimpleLSTMRNN(fluid.Layer):
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * fluid.layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
hidden_array[k] = m hidden_array[k] = m
cell_array[k] = c cell_array[k] = c
step_input = m step_input = m
......
...@@ -145,7 +145,7 @@ class BOW(fluid.dygraph.Layer): ...@@ -145,7 +145,7 @@ class BOW(fluid.dygraph.Layer):
emb = emb * mask_emb emb = emb * mask_emb
emb = fluid.layers.reshape(emb, shape=[-1, self.seq_len, self.hid_dim]) emb = fluid.layers.reshape(emb, shape=[-1, self.seq_len, self.hid_dim])
bow_1 = fluid.layers.reduce_sum(emb, dim=1) bow_1 = fluid.layers.reduce_sum(emb, dim=1)
bow_1 = fluid.layers.tanh(bow_1) bow_1 = paddle.tanh(bow_1)
fc_1 = self._fc1(bow_1) fc_1 = self._fc1(bow_1)
fc_2 = self._fc2(fc_1) fc_2 = self._fc2(fc_1)
prediction = self._fc_prediction(fc_2) prediction = self._fc_prediction(fc_2)
...@@ -197,7 +197,7 @@ class GRU(fluid.dygraph.Layer): ...@@ -197,7 +197,7 @@ class GRU(fluid.dygraph.Layer):
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
gru_hidden = self._gru(fc_1) gru_hidden = self._gru(fc_1)
gru_hidden = fluid.layers.reduce_max(gru_hidden, dim=1) gru_hidden = fluid.layers.reduce_max(gru_hidden, dim=1)
tanh_1 = fluid.layers.tanh(gru_hidden) tanh_1 = paddle.tanh(gru_hidden)
fc_2 = self._fc2(tanh_1) fc_2 = self._fc2(tanh_1)
prediction = self._fc_prediction(fc_2) prediction = self._fc_prediction(fc_2)
...@@ -253,8 +253,8 @@ class BiGRU(fluid.dygraph.Layer): ...@@ -253,8 +253,8 @@ class BiGRU(fluid.dygraph.Layer):
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
gru_forward = self._gru_forward(fc_1) gru_forward = self._gru_forward(fc_1)
gru_backward = self._gru_backward(fc_1) gru_backward = self._gru_backward(fc_1)
gru_forward_tanh = fluid.layers.tanh(gru_forward) gru_forward_tanh = paddle.tanh(gru_forward)
gru_backward_tanh = fluid.layers.tanh(gru_backward) gru_backward_tanh = paddle.tanh(gru_backward)
encoded_vector = fluid.layers.concat( encoded_vector = fluid.layers.concat(
input=[gru_forward_tanh, gru_backward_tanh], axis=2 input=[gru_forward_tanh, gru_backward_tanh], axis=2
) )
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import unittest import unittest
import paddle
from paddle.fluid.dygraph.nn import Embedding from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.dygraph import ProgramTranslator from paddle.fluid.dygraph import ProgramTranslator
from paddle.fluid.dygraph import declarative from paddle.fluid.dygraph import declarative
...@@ -260,7 +261,7 @@ class SkipGram(fluid.dygraph.Layer): ...@@ -260,7 +261,7 @@ class SkipGram(fluid.dygraph.Layer):
) )
word_sim = fluid.layers.reduce_sum(word_sim, dim=-1) word_sim = fluid.layers.reduce_sum(word_sim, dim=-1)
pred = fluid.layers.sigmoid(word_sim) pred = paddle.nn.functional.sigmoid(word_sim)
loss = fluid.layers.sigmoid_cross_entropy_with_logits(word_sim, label) loss = fluid.layers.sigmoid_cross_entropy_with_logits(word_sim, label)
loss = fluid.layers.reduce_mean(loss) loss = fluid.layers.reduce_mean(loss)
......
...@@ -51,7 +51,7 @@ class TestBase(IPUOpTest): ...@@ -51,7 +51,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32"
) )
out = paddle.fluid.layers.cumsum(x, **self.attrs) out = paddle.cumsum(x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
...@@ -90,7 +90,7 @@ class TestCase4(TestBase): ...@@ -90,7 +90,7 @@ class TestCase4(TestBase):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="int32" name=self.feed_list[0], shape=self.feed_shape[0], dtype="int32"
) )
out = paddle.fluid.layers.cumsum(x, **self.attrs) out = paddle.cumsum(x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
...@@ -104,7 +104,7 @@ class TestCase5(TestBase): ...@@ -104,7 +104,7 @@ class TestCase5(TestBase):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="int64" name=self.feed_list[0], shape=self.feed_shape[0], dtype="int64"
) )
out = paddle.fluid.layers.cumsum(x, **self.attrs) out = paddle.cumsum(x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
......
...@@ -45,7 +45,7 @@ class TestBase(IPUOpTest): ...@@ -45,7 +45,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
out = paddle.fluid.layers.gelu(x, **self.attrs) out = paddle.nn.functional.gelu(x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
......
...@@ -29,7 +29,7 @@ class TestBase(IPUOpTest): ...@@ -29,7 +29,7 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.abs self.op = paddle.abs
self.op_attrs = {} self.op_attrs = {}
def set_data_feed(self): def set_data_feed(self):
...@@ -70,55 +70,55 @@ class TestAcos(TestBase): ...@@ -70,55 +70,55 @@ class TestAcos(TestBase):
self.atol = 1e-6 self.atol = 1e-6
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.acos self.op = paddle.acos
self.op_attrs = {} self.op_attrs = {}
class TestAsin(TestAcos): class TestAsin(TestAcos):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.asin self.op = paddle.asin
self.op_attrs = {} self.op_attrs = {}
class TestSinh(TestAcos): class TestSinh(TestAcos):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.sinh self.op = paddle.sinh
self.op_attrs = {} self.op_attrs = {}
class TestAtan(TestBase): class TestAtan(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.atan self.op = paddle.atan
self.op_attrs = {} self.op_attrs = {}
class TestCeil(TestBase): class TestCeil(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.ceil self.op = paddle.ceil
self.op_attrs = {} self.op_attrs = {}
class TestCos(TestBase): class TestCos(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.cos self.op = paddle.cos
self.op_attrs = {} self.op_attrs = {}
class TestCosh(TestBase): class TestCosh(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.cosh self.op = paddle.cosh
self.op_attrs = {} self.op_attrs = {}
class TestErf(TestBase): class TestErf(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.erf self.op = paddle.erf
self.op_attrs = {} self.op_attrs = {}
class TestExp(TestBase): class TestExp(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.exp self.op = paddle.exp
self.op_attrs = {} self.op_attrs = {}
...@@ -128,19 +128,19 @@ class TestFloor(TestBase): ...@@ -128,19 +128,19 @@ class TestFloor(TestBase):
return False return False
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.floor self.op = paddle.floor
self.op_attrs = {} self.op_attrs = {}
class TestLog(TestBase): class TestLog(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.log self.op = paddle.log
self.op_attrs = {} self.op_attrs = {}
class TestReciprocal(TestBase): class TestReciprocal(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reciprocal self.op = paddle.reciprocal
self.op_attrs = {} self.op_attrs = {}
...@@ -152,55 +152,55 @@ class TestRelu(TestBase): ...@@ -152,55 +152,55 @@ class TestRelu(TestBase):
class TestRound(TestBase): class TestRound(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.round self.op = paddle.round
self.op_attrs = {} self.op_attrs = {}
class TestSigmoid(TestBase): class TestSigmoid(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.sigmoid self.op = paddle.nn.functional.sigmoid
self.op_attrs = {} self.op_attrs = {}
class TestSign(TestBase): class TestSign(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.sign self.op = paddle.sign
self.op_attrs = {} self.op_attrs = {}
class TestSin(TestBase): class TestSin(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.sin self.op = paddle.sin
self.op_attrs = {} self.op_attrs = {}
class TestSoftplus(TestBase): class TestSoftplus(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.softplus self.op = paddle.nn.functional.softplus
self.op_attrs = {} self.op_attrs = {}
class TestSoftsign(TestBase): class TestSoftsign(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.softsign self.op = paddle.nn.functional.softsign
self.op_attrs = {} self.op_attrs = {}
class TestSqrt(TestBase): class TestSqrt(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.sqrt self.op = paddle.sqrt
self.op_attrs = {} self.op_attrs = {}
class TestTan(TestBase): class TestTan(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.tan self.op = paddle.tan
self.op_attrs = {} self.op_attrs = {}
class TestTanh(TestBase): class TestTanh(TestBase):
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.tanh self.op = paddle.tanh
self.op_attrs = {} self.op_attrs = {}
......
...@@ -75,7 +75,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Tanh( ...@@ -75,7 +75,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Tanh(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.tanh self.act = paddle.tanh
class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu( class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu(
...@@ -108,7 +108,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_SQRT( ...@@ -108,7 +108,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_SQRT(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.sqrt self.act = paddle.sqrt
class ElementwiseActivationMkldnnFusePassTest_Add_ABS( class ElementwiseActivationMkldnnFusePassTest_Add_ABS(
...@@ -116,7 +116,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_ABS( ...@@ -116,7 +116,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_ABS(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.abs self.act = paddle.abs
class ElementwiseActivationMkldnnFusePassTest_Add_Clip( class ElementwiseActivationMkldnnFusePassTest_Add_Clip(
...@@ -134,7 +134,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu( ...@@ -134,7 +134,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.gelu self.act = paddle.nn.functional.gelu
class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh( class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh(
...@@ -142,7 +142,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh( ...@@ -142,7 +142,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.gelu self.act = paddle.nn.functional.gelu
self.act_alpha = True self.act_alpha = True
...@@ -159,7 +159,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid( ...@@ -159,7 +159,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.sigmoid self.act = paddle.nn.functional.sigmoid
class ElementwiseActivationMkldnnFusePassTest_Sub_Relu( class ElementwiseActivationMkldnnFusePassTest_Sub_Relu(
...@@ -175,7 +175,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh( ...@@ -175,7 +175,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_sub self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.tanh self.act = paddle.tanh
class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu( class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu(
...@@ -208,7 +208,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_ABS( ...@@ -208,7 +208,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_ABS(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_sub self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.abs self.act = paddle.abs
class ElementwiseActivationMkldnnFusePassTest_Sub_Clip( class ElementwiseActivationMkldnnFusePassTest_Sub_Clip(
...@@ -226,7 +226,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu( ...@@ -226,7 +226,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_sub self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.gelu self.act = paddle.nn.functional.gelu
class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh( class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh(
...@@ -234,7 +234,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh( ...@@ -234,7 +234,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_sub self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.gelu self.act = paddle.nn.functional.gelu
self.act_alpha = True self.act_alpha = True
...@@ -251,7 +251,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid( ...@@ -251,7 +251,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_sub self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.sigmoid self.act = paddle.nn.functional.sigmoid
class ElementwiseActivationMkldnnFusePassTest_Mul_Relu( class ElementwiseActivationMkldnnFusePassTest_Mul_Relu(
...@@ -267,7 +267,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh( ...@@ -267,7 +267,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.tanh self.act = paddle.tanh
class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu( class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu(
...@@ -300,7 +300,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT( ...@@ -300,7 +300,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.sqrt self.act = paddle.sqrt
class ElementwiseActivationMkldnnFusePassTest_Mul_ABS( class ElementwiseActivationMkldnnFusePassTest_Mul_ABS(
...@@ -308,7 +308,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_ABS( ...@@ -308,7 +308,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_ABS(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.abs self.act = paddle.abs
class ElementwiseActivationMkldnnFusePassTest_Mul_Clip( class ElementwiseActivationMkldnnFusePassTest_Mul_Clip(
...@@ -326,7 +326,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu( ...@@ -326,7 +326,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.gelu self.act = paddle.nn.functional.gelu
class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh( class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh(
...@@ -334,7 +334,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh( ...@@ -334,7 +334,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.gelu self.act = paddle.nn.functional.gelu
self.act_alpha = True self.act_alpha = True
...@@ -351,7 +351,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid( ...@@ -351,7 +351,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.sigmoid self.act = paddle.nn.functional.sigmoid
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,6 +17,7 @@ import shutil ...@@ -17,6 +17,7 @@ import shutil
import unittest import unittest
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
...@@ -81,7 +82,7 @@ class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest): ...@@ -81,7 +82,7 @@ class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest):
class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x): def append_act(self, x):
return fluid.layers.sigmoid(x) return paddle.nn.functional.sigmoid(x)
class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):
...@@ -108,7 +109,7 @@ class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest): ...@@ -108,7 +109,7 @@ class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):
class TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x): def append_act(self, x):
return fluid.layers.tanh(x) return paddle.tanh(x)
class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest):
...@@ -303,7 +304,7 @@ class TensorRTSubgraphPassPreluFp16DynamicSerializeTest( ...@@ -303,7 +304,7 @@ class TensorRTSubgraphPassPreluFp16DynamicSerializeTest(
class TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x): def append_act(self, x):
return fluid.layers.gelu(x) return paddle.nn.functional.gelu(x)
class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest):
...@@ -322,7 +323,7 @@ class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest): ...@@ -322,7 +323,7 @@ class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest):
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.gelu(x) return paddle.nn.functional.gelu(x)
class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest):
...@@ -333,7 +334,7 @@ class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest): ...@@ -333,7 +334,7 @@ class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest):
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.gelu(x) return paddle.nn.functional.gelu(x)
class TensorRTSubgraphPassGeluFp16SerializeTest( class TensorRTSubgraphPassGeluFp16SerializeTest(
...@@ -346,7 +347,7 @@ class TensorRTSubgraphPassGeluFp16SerializeTest( ...@@ -346,7 +347,7 @@ class TensorRTSubgraphPassGeluFp16SerializeTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.gelu(x) return paddle.nn.functional.gelu(x)
class TensorRTSubgraphPassGeluFp16DynamicTest( class TensorRTSubgraphPassGeluFp16DynamicTest(
...@@ -367,7 +368,7 @@ class TensorRTSubgraphPassGeluFp16DynamicTest( ...@@ -367,7 +368,7 @@ class TensorRTSubgraphPassGeluFp16DynamicTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.gelu(x) return paddle.nn.functional.gelu(x)
class TensorRTSubgraphPassGeluFp16DynamicSerializeTest( class TensorRTSubgraphPassGeluFp16DynamicSerializeTest(
...@@ -388,7 +389,7 @@ class TensorRTSubgraphPassGeluFp16DynamicSerializeTest( ...@@ -388,7 +389,7 @@ class TensorRTSubgraphPassGeluFp16DynamicSerializeTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.gelu(x) return paddle.nn.functional.gelu(x)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
import paddle
from pass_test import PassTest from pass_test import PassTest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
...@@ -85,10 +86,14 @@ class FusionGroupPassComplicatedTest(FusionGroupPassTest): ...@@ -85,10 +86,14 @@ class FusionGroupPassComplicatedTest(FusionGroupPassTest):
one = layers.fill_constant(shape=[1], dtype=dtype, value=1.0) one = layers.fill_constant(shape=[1], dtype=dtype, value=1.0)
tmp_0 = one * self.feed_vars[0] tmp_0 = one * self.feed_vars[0]
# subgraph with 9 op nodes # subgraph with 9 op nodes
tmp_1 = tmp_0 * layers.sigmoid(self.feed_vars[1]) + layers.sigmoid( tmp_1 = tmp_0 * paddle.nn.functional.sigmoid(
self.feed_vars[2] self.feed_vars[1]
) * layers.tanh(self.feed_vars[3]) ) + paddle.nn.functional.sigmoid(self.feed_vars[2]) * paddle.tanh(
tmp_2 = layers.tanh(tmp_1) + layers.sigmoid(self.feed_vars[4]) self.feed_vars[3]
)
tmp_2 = paddle.tanh(tmp_1) + paddle.nn.functional.sigmoid(
self.feed_vars[4]
)
self.append_gradients(tmp_2) self.append_gradients(tmp_2)
...@@ -162,10 +167,10 @@ class FusionGroupPassSumTest(FusionGroupPassTest): ...@@ -162,10 +167,10 @@ class FusionGroupPassSumTest(FusionGroupPassTest):
tmp_0 = layers.sum( tmp_0 = layers.sum(
[self.feed_vars[0], self.feed_vars[1], self.feed_vars[2]] [self.feed_vars[0], self.feed_vars[1], self.feed_vars[2]]
) )
tmp_1 = layers.sqrt(tmp_0) tmp_1 = paddle.sqrt(tmp_0)
tmp_2 = layers.mul(tmp_0, self.feed_vars[3]) tmp_2 = layers.mul(tmp_0, self.feed_vars[3])
# subgraph with 2 op nodes # subgraph with 2 op nodes
tmp_3 = layers.square(layers.sum([tmp_1, tmp_2])) tmp_3 = paddle.square(layers.sum([tmp_1, tmp_2]))
self.append_gradients(tmp_3) self.append_gradients(tmp_3)
......
...@@ -97,7 +97,7 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): ...@@ -97,7 +97,7 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase):
) )
if self.bn_dtype == np.float16: if self.bn_dtype == np.float16:
bn = fluid.layers.cast(bn, 'float32') bn = fluid.layers.cast(bn, 'float32')
sigmoid = fluid.layers.sigmoid(bn) sigmoid = paddle.nn.functional.sigmoid(bn)
out = fluid.layers.reduce_sum(sigmoid) out = fluid.layers.reduce_sum(sigmoid)
# if not sync_bn: # if not sync_bn:
# out = out / core.get_mlu_device_count() # out = out / core.get_mlu_device_count()
......
...@@ -109,7 +109,7 @@ class TestGeluNet(unittest.TestCase): ...@@ -109,7 +109,7 @@ class TestGeluNet(unittest.TestCase):
c = paddle.multiply(a, b) c = paddle.multiply(a, b)
fc_1 = fluid.layers.fc(input=c, size=128) fc_1 = fluid.layers.fc(input=c, size=128)
fc_1_gelu = fluid.layers.gelu(fc_1) fc_1_gelu = paddle.nn.functional.gelu(fc_1)
prediction = fluid.layers.fc(input=fc_1_gelu, size=2, act='softmax') prediction = fluid.layers.fc(input=fc_1_gelu, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
......
...@@ -99,7 +99,7 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): ...@@ -99,7 +99,7 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase):
) )
# if self.dtype == np.float16: # if self.dtype == np.float16:
# bn = fluid.layers.cast(bn, 'float32') # bn = fluid.layers.cast(bn, 'float32')
sigmoid = fluid.layers.sigmoid(bn) sigmoid = paddle.nn.functional.sigmoid(bn)
out = fluid.layers.reduce_sum(sigmoid) out = fluid.layers.reduce_sum(sigmoid)
# if not sync_bn: # if not sync_bn:
# out = out / core.get_npu_device_count() # out = out / core.get_npu_device_count()
......
...@@ -109,7 +109,7 @@ class TestGeluNet(unittest.TestCase): ...@@ -109,7 +109,7 @@ class TestGeluNet(unittest.TestCase):
c = paddle.multiply(a, b) c = paddle.multiply(a, b)
fc_1 = fluid.layers.fc(input=c, size=128) fc_1 = fluid.layers.fc(input=c, size=128)
fc_1_gelu = fluid.layers.gelu(fc_1) fc_1_gelu = paddle.nn.functional.gelu(fc_1)
prediction = fluid.layers.fc(input=fc_1_gelu, size=2, act='softmax') prediction = fluid.layers.fc(input=fc_1_gelu, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
......
...@@ -88,7 +88,7 @@ def bow_net( ...@@ -88,7 +88,7 @@ def bow_net(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
......
...@@ -33,7 +33,7 @@ class TestSigmoidTripleGradCheck(unittest.TestCase): ...@@ -33,7 +33,7 @@ class TestSigmoidTripleGradCheck(unittest.TestCase):
dtype = np.float64 dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype) x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True x.persistable = True
y = layers.sigmoid(x) y = F.sigmoid(x)
x_arr = np.random.random(shape).astype(dtype) x_arr = np.random.random(shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002 x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.triple_grad_check( gradient_checker.triple_grad_check(
...@@ -51,7 +51,7 @@ class TestSigmoidTripleGradCheck(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestSigmoidTripleGradCheck(unittest.TestCase):
class TestSigmoidDoubleGradCheck(unittest.TestCase): class TestSigmoidDoubleGradCheck(unittest.TestCase):
def sigmoid_wrapper(self, x): def sigmoid_wrapper(self, x):
return fluid.layers.sigmoid(x[0]) return F.sigmoid(x[0])
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
...@@ -60,7 +60,7 @@ class TestSigmoidDoubleGradCheck(unittest.TestCase): ...@@ -60,7 +60,7 @@ class TestSigmoidDoubleGradCheck(unittest.TestCase):
dtype = np.float64 dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype) x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True x.persistable = True
y = layers.sigmoid(x) y = F.sigmoid(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002 x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
...@@ -92,7 +92,7 @@ class TestTanhTripleGradCheck(unittest.TestCase): ...@@ -92,7 +92,7 @@ class TestTanhTripleGradCheck(unittest.TestCase):
dtype = np.float64 dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype) x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True x.persistable = True
y = layers.tanh(x) y = paddle.tanh(x)
x_arr = np.random.random(shape).astype(dtype) x_arr = np.random.random(shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002 x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.triple_grad_check( gradient_checker.triple_grad_check(
...@@ -322,7 +322,7 @@ class TestSqrtDoubleGradCheck(unittest.TestCase): ...@@ -322,7 +322,7 @@ class TestSqrtDoubleGradCheck(unittest.TestCase):
x = layers.data('x', shape, False, dtype) x = layers.data('x', shape, False, dtype)
x.persistable = True x.persistable = True
y = layers.sqrt(x) y = paddle.sqrt(x)
x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) x_arr = np.random.uniform(0.1, 1, shape).astype(dtype)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
...@@ -354,7 +354,7 @@ class TestRsqrtDoubleGradCheck(unittest.TestCase): ...@@ -354,7 +354,7 @@ class TestRsqrtDoubleGradCheck(unittest.TestCase):
x = layers.data('x', shape, False, dtype) x = layers.data('x', shape, False, dtype)
x.persistable = True x.persistable = True
y = layers.rsqrt(x) y = paddle.rsqrt(x)
x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) x_arr = np.random.uniform(0.1, 1, shape).astype(dtype)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
...@@ -386,7 +386,7 @@ class TestSquareDoubleGradCheck(unittest.TestCase): ...@@ -386,7 +386,7 @@ class TestSquareDoubleGradCheck(unittest.TestCase):
x = layers.data('x', shape, False, dtype) x = layers.data('x', shape, False, dtype)
x.persistable = True x.persistable = True
y = layers.square(x) y = paddle.square(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
...@@ -417,7 +417,7 @@ class TestAbsDoubleGradCheck(unittest.TestCase): ...@@ -417,7 +417,7 @@ class TestAbsDoubleGradCheck(unittest.TestCase):
x = layers.data('x', shape, False, dtype) x = layers.data('x', shape, False, dtype)
x.persistable = True x.persistable = True
y = layers.abs(x) y = paddle.abs(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
# Because we set delta = 0.005 in calculating numeric gradient, # Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, the numeric gradient is inaccurate. # if x is too small, the numeric gradient is inaccurate.
...@@ -449,7 +449,7 @@ class TestLogDoubleGradCheck(unittest.TestCase): ...@@ -449,7 +449,7 @@ class TestLogDoubleGradCheck(unittest.TestCase):
x = layers.data('x', shape, False, dtype) x = layers.data('x', shape, False, dtype)
x.persistable = True x.persistable = True
y = layers.log(x) y = paddle.log(x)
x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) x_arr = np.random.uniform(0.1, 1, shape).astype(dtype)
...@@ -608,7 +608,7 @@ class TestSinTripleGradCheck(unittest.TestCase): ...@@ -608,7 +608,7 @@ class TestSinTripleGradCheck(unittest.TestCase):
dtype = np.float64 dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype) x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True x.persistable = True
y = layers.sin(x) y = paddle.sin(x)
x_arr = np.random.random(shape).astype(dtype) x_arr = np.random.random(shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002 x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.triple_grad_check( gradient_checker.triple_grad_check(
...@@ -733,7 +733,7 @@ class TestCosTripleGradCheck(unittest.TestCase): ...@@ -733,7 +733,7 @@ class TestCosTripleGradCheck(unittest.TestCase):
dtype = np.float64 dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype) x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True x.persistable = True
y = layers.cos(x) y = paddle.cos(x)
x_arr = np.random.random(shape).astype(dtype) x_arr = np.random.random(shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002 x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.triple_grad_check( gradient_checker.triple_grad_check(
......
...@@ -33,17 +33,17 @@ class TestSqrtOpError(unittest.TestCase): ...@@ -33,17 +33,17 @@ class TestSqrtOpError(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# The input type of sqrt op must be Variable or numpy.ndarray. # The input type of sqrt op must be Variable or numpy.ndarray.
in1 = 1 in1 = 1
self.assertRaises(TypeError, fluid.layers.sqrt, in1) self.assertRaises(TypeError, paddle.sqrt, in1)
# The input dtype of sqrt op must be float16, float32, float64. # The input dtype of sqrt op must be float16, float32, float64.
in2 = fluid.layers.data( in2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32" name='input2', shape=[12, 10], dtype="int32"
) )
self.assertRaises(TypeError, fluid.layers.sqrt, in2) self.assertRaises(TypeError, paddle.sqrt, in2)
in3 = fluid.layers.data( in3 = fluid.layers.data(
name='input3', shape=[12, 10], dtype="float16" name='input3', shape=[12, 10], dtype="float16"
) )
fluid.layers.sqrt(x=in3) paddle.sqrt(x=in3)
class TestActivation(OpTest): class TestActivation(OpTest):
...@@ -390,16 +390,6 @@ class TestLogSigmoidAPI(unittest.TestCase): ...@@ -390,16 +390,6 @@ class TestLogSigmoidAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [11, 17])
out = paddle.fluid.layers.logsigmoid(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -488,16 +478,6 @@ class TestTanhAPI(unittest.TestCase): ...@@ -488,16 +478,6 @@ class TestTanhAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12], self.dtype)
out = fluid.layers.tanh(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = np.tanh(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -593,7 +573,7 @@ class TestSinhAPI(unittest.TestCase): ...@@ -593,7 +573,7 @@ class TestSinhAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.array([0.1]) np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.sinh(x).numpy() z = paddle.sinh(x).numpy()
z_expected = np.sinh(np_x) z_expected = np.sinh(np_x)
np.testing.assert_allclose(z, z_expected, rtol=1e-05) np.testing.assert_allclose(z, z_expected, rtol=1e-05)
...@@ -610,7 +590,7 @@ class TestSinhAPI(unittest.TestCase): ...@@ -610,7 +590,7 @@ class TestSinhAPI(unittest.TestCase):
dtype="float32", dtype="float32",
) )
pd_sinh_out = fluid.layers.sinh(data_x) pd_sinh_out = paddle.sinh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
(np_sinh_res,) = exe.run( (np_sinh_res,) = exe.run(
...@@ -630,7 +610,7 @@ class TestSinhAPI(unittest.TestCase): ...@@ -630,7 +610,7 @@ class TestSinhAPI(unittest.TestCase):
) )
var = fluid.dygraph.to_variable(input_x) var = fluid.dygraph.to_variable(input_x)
var.stop_gradient = False var.stop_gradient = False
loss = fluid.layers.sinh(var) loss = paddle.sinh(var)
loss.backward() loss.backward()
grad_var = var.gradient() grad_var = var.gradient()
self.assertEqual(grad_var.shape, input_x.shape) self.assertEqual(grad_var.shape, input_x.shape)
...@@ -640,13 +620,13 @@ class TestSinhOpError(unittest.TestCase): ...@@ -640,13 +620,13 @@ class TestSinhOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program()): with program_guard(Program()):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.sinh, 1) self.assertRaises(TypeError, paddle.sinh, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, fluid.layers.sinh, x_int32) self.assertRaises(TypeError, paddle.sinh, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
fluid.layers.sinh(x_fp16) paddle.sinh(x_fp16)
class TestCosh(TestActivation): class TestCosh(TestActivation):
...@@ -678,7 +658,7 @@ class TestCoshAPI(unittest.TestCase): ...@@ -678,7 +658,7 @@ class TestCoshAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.array([0.1]) np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.cosh(x).numpy() z = paddle.cosh(x).numpy()
z_expected = np.cosh(np_x) z_expected = np.cosh(np_x)
np.testing.assert_allclose(z, z_expected, rtol=1e-05) np.testing.assert_allclose(z, z_expected, rtol=1e-05)
...@@ -715,7 +695,7 @@ class TestCoshAPI(unittest.TestCase): ...@@ -715,7 +695,7 @@ class TestCoshAPI(unittest.TestCase):
) )
var = fluid.dygraph.to_variable(input_x) var = fluid.dygraph.to_variable(input_x)
var.stop_gradient = False var.stop_gradient = False
loss = fluid.layers.cosh(var) loss = paddle.cosh(var)
loss.backward() loss.backward()
grad_var = var.gradient() grad_var = var.gradient()
self.assertEqual(grad_var.shape, input_x.shape) self.assertEqual(grad_var.shape, input_x.shape)
...@@ -725,13 +705,13 @@ class TestCoshOpError(unittest.TestCase): ...@@ -725,13 +705,13 @@ class TestCoshOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program()): with program_guard(Program()):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.cosh, 1) self.assertRaises(TypeError, paddle.cosh, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, fluid.layers.cosh, x_int32) self.assertRaises(TypeError, paddle.cosh, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
fluid.layers.cosh(x_fp16) paddle.cosh(x_fp16)
def ref_tanhshrink(x): def ref_tanhshrink(x):
...@@ -798,16 +778,6 @@ class TestTanhshrinkAPI(unittest.TestCase): ...@@ -798,16 +778,6 @@ class TestTanhshrinkAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.tanh_shrink(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_tanhshrink(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -914,16 +884,6 @@ class TestHardShrinkAPI(unittest.TestCase): ...@@ -914,16 +884,6 @@ class TestHardShrinkAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12])
out = fluid.layers.hard_shrink(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_hardshrink(self.x_np, 0.5)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -1080,16 +1040,6 @@ class TestSoftshrinkAPI(unittest.TestCase): ...@@ -1080,16 +1040,6 @@ class TestSoftshrinkAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.softshrink(x, self.threshold)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_softshrink(self.x_np, self.threshold)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -1780,16 +1730,6 @@ class TestLeakyReluAPI(unittest.TestCase): ...@@ -1780,16 +1730,6 @@ class TestLeakyReluAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12])
out = fluid.layers.leaky_relu(x, 0.01)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_leaky_relu(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -3120,16 +3060,6 @@ class TestSoftplusAPI(unittest.TestCase): ...@@ -3120,16 +3060,6 @@ class TestSoftplusAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.softplus(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_softplus(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -3215,16 +3145,6 @@ class TestSoftsignAPI(unittest.TestCase): ...@@ -3215,16 +3145,6 @@ class TestSoftsignAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.softsign(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_softsign(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -3314,16 +3234,6 @@ class TestThresholdedReluAPI(unittest.TestCase): ...@@ -3314,16 +3234,6 @@ class TestThresholdedReluAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.thresholded_relu(x, self.threshold)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_thresholded_relu(self.x_np, self.threshold)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
...@@ -3660,45 +3570,6 @@ class TestMishAPI(unittest.TestCase): ...@@ -3660,45 +3570,6 @@ class TestMishAPI(unittest.TestCase):
F.mish(x_fp16) F.mish(x_fp16)
# ------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
class TestOpErrors(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
op = getattr(fluid.layers, op_type)
# The input dtype of op_type must be float32, float64.
in1 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32"
)
in2 = fluid.layers.data(
name='input3', shape=[12, 10], dtype="int64"
)
self.assertRaises(TypeError, op, in1)
self.assertRaises(TypeError, op, in2)
cls_name = "{0}_{1}".format(op_type, "test_errors")
TestOpErrors.__name__ = cls_name
globals()[cls_name] = TestOpErrors
create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
create_test_error_class('tan')
create_test_error_class('acosh')
create_test_error_class('asinh')
create_test_error_class('atanh')
# ------------------ Test Cudnn Activation---------------------- # ------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3): def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
@unittest.skipIf( @unittest.skipIf(
......
...@@ -371,7 +371,7 @@ class BadInputTest(unittest.TestCase): ...@@ -371,7 +371,7 @@ class BadInputTest(unittest.TestCase):
def test_bad_x(): def test_bad_x():
data = [1, 2, 4] data = [1, 2, 4]
result = fluid.layers.cumsum(data, axis=0) result = paddle.cumsum(data, axis=0)
self.assertRaises(TypeError, test_bad_x) self.assertRaises(TypeError, test_bad_x)
......
...@@ -87,7 +87,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -87,7 +87,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -150,7 +150,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -150,7 +150,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -83,7 +83,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -83,7 +83,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -111,7 +111,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -111,7 +111,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -138,7 +138,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -86,7 +86,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -86,7 +86,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -114,7 +114,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -141,7 +141,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -89,7 +89,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -89,7 +89,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -148,7 +148,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -148,7 +148,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -88,7 +88,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -88,7 +88,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
q_ss = fluid.layers.data_norm(input=q_ss) q_ss = fluid.layers.data_norm(input=q_ss)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -148,7 +148,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -148,7 +148,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -87,7 +87,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -87,7 +87,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -150,7 +150,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -150,7 +150,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -85,7 +85,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -115,7 +115,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -144,7 +144,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -144,7 +144,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -87,7 +87,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -87,7 +87,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -150,7 +150,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -150,7 +150,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -85,7 +85,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestPSPassWithBow(unittest.TestCase):
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = fluid.layers.fc( q_fc = fluid.layers.fc(
input=q_ss, input=q_ss,
...@@ -115,7 +115,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestPSPassWithBow(unittest.TestCase):
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = fluid.layers.fc( pt_fc = fluid.layers.fc(
input=pt_ss, input=pt_ss,
...@@ -144,7 +144,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -144,7 +144,7 @@ class TestPSPassWithBow(unittest.TestCase):
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = fluid.layers.fc( nt_fc = fluid.layers.fc(
input=nt_ss, input=nt_ss,
......
...@@ -38,7 +38,7 @@ def gru_net( ...@@ -38,7 +38,7 @@ def gru_net(
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3) fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3)
gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False) gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)
gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max') gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max')
gru_max_tanh = fluid.layers.tanh(gru_max) gru_max_tanh = paddle.tanh(gru_max)
fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh') fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
......
...@@ -40,7 +40,7 @@ def lstm_net( ...@@ -40,7 +40,7 @@ def lstm_net(
input=fc0, size=hid_dim * 4, is_reverse=False input=fc0, size=hid_dim * 4, is_reverse=False
) )
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max) lstm_max_tanh = paddle.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
......
...@@ -191,10 +191,10 @@ def lm_model( ...@@ -191,10 +191,10 @@ def lm_model(
ends=[hidden_size * 4], ends=[hidden_size * 4],
) )
c = pre_cell * layers.sigmoid(f) + layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = layers.tanh(c) * layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
rnn.update_memory(pre_hidden, m) rnn.update_memory(pre_hidden, m)
rnn.update_memory(pre_cell, c) rnn.update_memory(pre_cell, c)
...@@ -299,10 +299,10 @@ def lm_model( ...@@ -299,10 +299,10 @@ def lm_model(
gate_input = layers.elementwise_add(gate_input, bias) gate_input = layers.elementwise_add(gate_input, bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
c = pre_cell * layers.sigmoid(f) + layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = layers.tanh(c) * layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
hidden_array[k] = m hidden_array[k] = m
cell_array[k] = c cell_array[k] = c
......
...@@ -327,7 +327,9 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): ...@@ -327,7 +327,9 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1):
bias_attr=False, bias_attr=False,
) )
h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) h = paddle.nn.functional.sigmoid(
x=layers.elementwise_add(x=temp_l, y=temp_r)
)
rnn.update_memory(h_pre, h) rnn.update_memory(h_pre, h)
rnn.output(h) rnn.output(h)
......
...@@ -48,7 +48,7 @@ class TestErfLayer(unittest.TestCase): ...@@ -48,7 +48,7 @@ class TestErfLayer(unittest.TestCase):
y_ref = erf(x) y_ref = erf(x)
with dg.guard(place) as g: with dg.guard(place) as g:
x_var = dg.to_variable(x) x_var = dg.to_variable(x)
y_var = fluid.layers.erf(x_var) y_var = paddle.erf(x_var)
y_test = y_var.numpy() y_test = y_var.numpy()
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05) np.testing.assert_allclose(y_ref, y_test, rtol=1e-05)
......
...@@ -45,7 +45,7 @@ class TestGeluOp(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestGeluOp(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place) as g: with dg.guard(place) as g:
x_var = dg.to_variable(x) x_var = dg.to_variable(x)
y_var = fluid.layers.gelu(x_var, approximate) y_var = F.gelu(x_var, approximate)
y_test = y_var.numpy() y_test = y_var.numpy()
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08) np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)
...@@ -56,7 +56,7 @@ class TestGeluOp(unittest.TestCase): ...@@ -56,7 +56,7 @@ class TestGeluOp(unittest.TestCase):
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
with dg.guard(place) as g: with dg.guard(place) as g:
x_var = dg.to_variable(x) x_var = dg.to_variable(x)
y_var = fluid.layers.gelu(x_var, approximate) y_var = F.gelu(x_var, approximate)
y_test = y_var.numpy() y_test = y_var.numpy()
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08) np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)
......
...@@ -35,7 +35,7 @@ def bow_net( ...@@ -35,7 +35,7 @@ def bow_net(
input=data, is_sparse=True, size=[dict_dim, emb_dim] input=data, is_sparse=True, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
......
...@@ -914,7 +914,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -914,7 +914,7 @@ class TestDygraphUtils(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = paddle.to_tensor(a_np) a = paddle.to_tensor(a_np)
res1 = func(a, act="sigmoid", use_mkldnn=True, use_cudnn=True) res1 = func(a, act="sigmoid", use_mkldnn=True, use_cudnn=True)
res2 = fluid.layers.sigmoid(a) res2 = paddle.nn.functional.sigmoid(a)
np.testing.assert_allclose(res1.numpy(), res2.numpy(), rtol=1e-05) np.testing.assert_allclose(res1.numpy(), res2.numpy(), rtol=1e-05)
def test_append_activation_in_dygraph2(self): def test_append_activation_in_dygraph2(self):
...@@ -929,7 +929,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -929,7 +929,7 @@ class TestDygraphUtils(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = paddle.to_tensor(a_np) a = paddle.to_tensor(a_np)
res1 = func(a, act="sigmoid", use_cudnn=True) res1 = func(a, act="sigmoid", use_cudnn=True)
res2 = fluid.layers.sigmoid(a) res2 = paddle.nn.functional.sigmoid(a)
np.testing.assert_array_equal(res1.numpy(), res2.numpy()) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph3(self): def test_append_activation_in_dygraph3(self):
......
...@@ -317,7 +317,7 @@ class SimpleAttention(fluid.dygraph.Layer): ...@@ -317,7 +317,7 @@ class SimpleAttention(fluid.dygraph.Layer):
concated = fluid.layers.elementwise_add( concated = fluid.layers.elementwise_add(
encoder_proj, decoder_state_expand encoder_proj, decoder_state_expand
) )
concated = fluid.layers.tanh(x=concated) concated = paddle.tanh(x=concated)
attention_weight = self.fc_2(concated) attention_weight = self.fc_2(concated)
weights_reshape = fluid.layers.reshape( weights_reshape = fluid.layers.reshape(
......
...@@ -115,10 +115,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -115,10 +115,10 @@ class SimpleLSTMRNN(fluid.Layer):
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * fluid.layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
self.hidden_array[k] = m self.hidden_array[k] = m
self.cell_array[k] = c self.cell_array[k] = c
self._input = m self._input = m
......
...@@ -110,10 +110,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -110,10 +110,10 @@ class SimpleLSTMRNN(fluid.Layer):
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * fluid.layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
self.hidden_array[k] = m self.hidden_array[k] = m
self.cell_array[k] = c self.cell_array[k] = c
self._input = m self._input = m
......
...@@ -112,10 +112,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -112,10 +112,10 @@ class SimpleLSTMRNN(fluid.Layer):
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * fluid.layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
self.hidden_array[k] = m self.hidden_array[k] = m
self.cell_array[k] = c self.cell_array[k] = c
self._input = m self._input = m
......
...@@ -322,7 +322,7 @@ class Generator(fluid.dygraph.Layer): ...@@ -322,7 +322,7 @@ class Generator(fluid.dygraph.Layer):
res_block = self._res_block(conv0) res_block = self._res_block(conv0)
deconv = self._deconv(res_block) deconv = self._deconv(res_block)
conv1 = self._conv1(deconv) conv1 = self._conv1(deconv)
out = fluid.layers.tanh(conv1) out = paddle.tanh(conv1)
return out return out
...@@ -437,11 +437,9 @@ def gradient_penalty(f, real, fake, no_grad_set, cfg): ...@@ -437,11 +437,9 @@ def gradient_penalty(f, real, fake, no_grad_set, cfg):
) )
epsilon = 1e-16 epsilon = 1e-16
norm = fluid.layers.sqrt( norm = paddle.sqrt(paddle.sum(paddle.square(gradient), axis=1) + epsilon)
fluid.layers.reduce_sum(fluid.layers.square(gradient), dim=1) + epsilon
)
gp = fluid.layers.reduce_mean(fluid.layers.square(norm - 1.0)) gp = paddle.mean(paddle.square(norm - 1.0))
return gp return gp
...@@ -451,7 +449,7 @@ def get_generator_loss( ...@@ -451,7 +449,7 @@ def get_generator_loss(
fake_img = generator(image_real, label_trg) fake_img = generator(image_real, label_trg)
rec_img = generator(fake_img, label_org) rec_img = generator(fake_img, label_org)
g_loss_rec = fluid.layers.reduce_mean( g_loss_rec = fluid.layers.reduce_mean(
fluid.layers.abs(fluid.layers.elementwise_sub(image_real, rec_img)) paddle.abs(paddle.subtract(image_real, rec_img))
) )
pred_fake, cls_fake = discriminator(fake_img) pred_fake, cls_fake = discriminator(fake_img)
......
...@@ -182,7 +182,7 @@ class TestDygraphTripleGrad(TestCase): ...@@ -182,7 +182,7 @@ class TestDygraphTripleGrad(TestCase):
numel = z_np.size numel = z_np.size
z.stop_gradient = False z.stop_gradient = False
out = fluid.layers.sigmoid(paddle.matmul(x, y) + z) out = paddle.nn.functional.sigmoid(paddle.matmul(x, y) + z)
out_np = out.numpy() out_np = out.numpy()
(dx_actual,) = self.grad([out], [x], create_graph=True) (dx_actual,) = self.grad([out], [x], create_graph=True)
...@@ -278,7 +278,7 @@ class TestDygraphTripleGradBradcastCase(TestCase): ...@@ -278,7 +278,7 @@ class TestDygraphTripleGradBradcastCase(TestCase):
numel = z_np.size numel = z_np.size
z.stop_gradient = False z.stop_gradient = False
out = fluid.layers.sigmoid(paddle.matmul(x, y) + z) out = paddle.nn.functional.sigmoid(paddle.matmul(x, y) + z)
out_np = out.numpy() out_np = out.numpy()
(dx_actual,) = self.grad([out], [x], create_graph=True) (dx_actual,) = self.grad([out], [x], create_graph=True)
......
...@@ -87,7 +87,7 @@ class TestInplaceANBOpTraining(unittest.TestCase): ...@@ -87,7 +87,7 @@ class TestInplaceANBOpTraining(unittest.TestCase):
# a new Variable for fetch # a new Variable for fetch
bn = bn * 1.0 bn = bn * 1.0
sigmoid = fluid.layers.sigmoid(bn) sigmoid = paddle.nn.functional.sigmoid(bn)
out = fluid.layers.reduce_sum(sigmoid) out = fluid.layers.reduce_sum(sigmoid)
if not only_forward: if not only_forward:
sgd_opt = fluid.optimizer.SGD(learning_rate=0.0) sgd_opt = fluid.optimizer.SGD(learning_rate=0.0)
......
...@@ -41,7 +41,7 @@ def lstm_net( ...@@ -41,7 +41,7 @@ def lstm_net(
input=fc0, size=hid_dim * 4, is_reverse=False input=fc0, size=hid_dim * 4, is_reverse=False
) )
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max) lstm_max_tanh = paddle.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
......
...@@ -3680,126 +3680,6 @@ class TestBook(LayerTest): ...@@ -3680,126 +3680,6 @@ class TestBook(LayerTest):
) )
return out return out
def make_sigmoid(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.sigmoid(input, name='sigmoid')
return out
def make_exp(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.exp(input, name='exp')
return out
def make_tanh(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.tanh(input, name='tanh')
return out
def make_tanh_shrink(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.tanh_shrink(input, name='tanh_shrink')
return out
def make_sqrt(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.sqrt(input, name='sqrt')
return out
def make_abs(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.abs(input, name='abs')
return out
def make_ceil(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.ceil(input, name='ceil')
return out
def make_floor(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.floor(input, name='floor')
return out
def make_cos(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.cos(input, name='cos')
return out
def make_sin(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.sin(input, name='sin')
return out
def make_round(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.round(input, name='round')
return out
def make_reciprocal(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.reciprocal(input, name='reciprocal')
return out
def make_square(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.square(input, name='square')
return out
def make_softplus(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.softplus(input, name='softplus')
return out
def make_softsign(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.softsign(input, name='softsign')
return out
def make_mish(self): def make_mish(self):
with program_guard( with program_guard(
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
...@@ -3920,14 +3800,6 @@ class TestBook(LayerTest): ...@@ -3920,14 +3800,6 @@ class TestBook(LayerTest):
out = layers.scale(input, scale=scale_var) out = layers.scale(input, scale=scale_var)
return out return out
def make_softshrink(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.softshrink(input, alpha=0.3)
return out
def make_iou_similarity(self): def make_iou_similarity(self):
with program_guard( with program_guard(
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
......
...@@ -63,7 +63,7 @@ class TestLgammaOpApi(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestLgammaOpApi(unittest.TestCase):
shape = (1, 4) shape = (1, 4)
data = np.random.random(shape).astype(self.dtype) + 1 data = np.random.random(shape).astype(self.dtype) + 1
data_ = paddle.to_tensor(data) data_ = paddle.to_tensor(data)
out = paddle.fluid.layers.lgamma(data_) out = paddle.lgamma(data_)
result = special.gammaln(data) result = special.gammaln(data)
np.testing.assert_allclose(result, out.numpy(), rtol=1e-05) np.testing.assert_allclose(result, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
......
...@@ -50,7 +50,7 @@ def lstm_net(use_feed): ...@@ -50,7 +50,7 @@ def lstm_net(use_feed):
input=fc0, size=hid_dim * 4, is_reverse=False input=fc0, size=hid_dim * 4, is_reverse=False
) )
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max) lstm_max_tanh = paddle.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
......
...@@ -81,7 +81,7 @@ def simple_fc_net(img, label, use_py_func_op): ...@@ -81,7 +81,7 @@ def simple_fc_net(img, label, use_py_func_op):
), ),
) )
if not use_py_func_op: if not use_py_func_op:
hidden = fluid.layers.tanh(hidden) hidden = paddle.tanh(hidden)
else: else:
new_hidden = ( new_hidden = (
fluid.default_main_program() fluid.default_main_program()
......
...@@ -316,7 +316,9 @@ class RecurrentOpTest2(RecurrentOpTest1): ...@@ -316,7 +316,9 @@ class RecurrentOpTest2(RecurrentOpTest1):
bias_attr=False, bias_attr=False,
) )
h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) h = paddle.nn.functional.sigmoid(
x=layers.elementwise_add(x=temp_l, y=temp_r)
)
rnn.update_memory(h_pre, h) rnn.update_memory(h_pre, h)
rnn.output(h) rnn.output(h)
...@@ -710,7 +712,9 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1): ...@@ -710,7 +712,9 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1):
bias_attr=False, bias_attr=False,
) )
h = layers.sigmoid(x=layers.elementwise_add(temp_l, temp_r)) h = paddle.nn.functional.sigmoid(
x=layers.elementwise_add(temp_l, temp_r)
)
rnn.update_memory(h_pre, h) rnn.update_memory(h_pre, h)
rnn.output(h) rnn.output(h)
......
...@@ -135,7 +135,7 @@ def bow_net( ...@@ -135,7 +135,7 @@ def bow_net(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
...@@ -225,7 +225,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -225,7 +225,7 @@ class TestRegularizer(unittest.TestCase):
param_list = fluid.default_main_program().block(0).all_parameters() param_list = fluid.default_main_program().block(0).all_parameters()
para_sum = [] para_sum = []
for para in param_list: for para in param_list:
para_mul = fluid.layers.square(x=para) para_mul = paddle.square(x=para)
para_sum.append(fluid.layers.reduce_sum(input=para_mul)) para_sum.append(fluid.layers.reduce_sum(input=para_mul))
avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5 avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5
......
...@@ -41,7 +41,7 @@ def bow_net( ...@@ -41,7 +41,7 @@ def bow_net(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
...@@ -133,7 +133,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -133,7 +133,7 @@ class TestRegularizer(unittest.TestCase):
param_list = fluid.default_main_program().block(0).all_parameters() param_list = fluid.default_main_program().block(0).all_parameters()
para_sum = [] para_sum = []
for para in param_list: for para in param_list:
para_mul = fluid.layers.square(x=para) para_mul = paddle.square(x=para)
para_sum.append(fluid.layers.reduce_sum(input=para_mul)) para_sum.append(fluid.layers.reduce_sum(input=para_mul))
avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5 avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5
......
...@@ -30,7 +30,7 @@ class Generator(fluid.dygraph.Layer): ...@@ -30,7 +30,7 @@ class Generator(fluid.dygraph.Layer):
def forward(self, x): def forward(self, x):
x = self.conv1(x) x = self.conv1(x)
x = fluid.layers.tanh(x) x = paddle.tanh(x)
return x return x
......
...@@ -122,10 +122,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -122,10 +122,10 @@ class SimpleLSTMRNN(fluid.Layer):
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
i f
) * fluid.layers.tanh(j) ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
self.hidden_array[k] = m self.hidden_array[k] = m
self.cell_array[k] = c self.cell_array[k] = c
self._input = m self._input = m
......
...@@ -94,7 +94,7 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestSyncBatchNormOpTraining(unittest.TestCase):
bn = fluid.layers.cast(bn, 'float32') bn = fluid.layers.cast(bn, 'float32')
else: else:
bn = fluid.layers.cast(bn, 'float64') bn = fluid.layers.cast(bn, 'float64')
sigmoid = fluid.layers.sigmoid(bn) sigmoid = paddle.nn.functional.sigmoid(bn)
out = fluid.layers.reduce_sum(sigmoid) out = fluid.layers.reduce_sum(sigmoid)
if not sync_bn: if not sync_bn:
out = out / core.get_cuda_device_count() out = out / core.get_cuda_device_count()
......
...@@ -59,7 +59,7 @@ def bow_net( ...@@ -59,7 +59,7 @@ def bow_net(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from functools import partial from functools import partial
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
...@@ -156,7 +157,7 @@ def multi_head_attention( ...@@ -156,7 +157,7 @@ def multi_head_attention(
# So, here define the softmax for temporary solution. # So, here define the softmax for temporary solution.
def __softmax(x, eps=1e-9): def __softmax(x, eps=1e-9):
exp_out = layers.exp(x=x) exp_out = paddle.exp(x=x)
sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False) sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False)
return layers.elementwise_div(x=exp_out, y=sum_out, axis=0) return layers.elementwise_div(x=exp_out, y=sum_out, axis=0)
......
...@@ -209,7 +209,7 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase): ...@@ -209,7 +209,7 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase):
global_norm_var = global_norm_var_normal + global_norm_var_moe global_norm_var = global_norm_var_normal + global_norm_var_moe
params_and_grads = [] params_and_grads = []
global_norm_var = layers.sqrt(global_norm_var) global_norm_var = paddle.sqrt(global_norm_var)
max_global_norm = layers.fill_constant( max_global_norm = layers.fill_constant(
shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm
) )
......
...@@ -557,7 +557,7 @@ class ModelAverage(Optimizer): ...@@ -557,7 +557,7 @@ class ModelAverage(Optimizer):
sum = layers.cast( sum = layers.cast(
x=sum, dtype='float32' if self._dtype is None else self._dtype x=sum, dtype='float32' if self._dtype is None else self._dtype
) )
layers.ops._elementwise_div(x=sum, y=tmp, out=param) paddle.tensor.ops._elementwise_div(x=sum, y=tmp, out=param)
def _add_average_restore_op(self, block, param): def _add_average_restore_op(self, block, param):
param = block._clone_variable(param) param = block._clone_variable(param)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册