提交 60aaf39e 编写于 作者: M michaelowenliu

update SyncBN package and add Activation api

上级 5dcd2de1
...@@ -13,24 +13,22 @@ ...@@ -13,24 +13,22 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.nn.functional as F
from paddle import fluid from paddle import fluid
from paddle.fluid import dygraph from paddle.fluid import dygraph
from paddle.fluid.dygraph import Conv2D from paddle.fluid.dygraph import Conv2D
from paddle.fluid.dygraph import SyncBatchNorm as BatchNorm from paddle.nn import SyncBatchNorm as BatchNorm
import cv2 from paddle.nn.layer import activation
import os
import sys
class ConvBnRelu(dygraph.Layer): class ConvBnRelu(dygraph.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
filter_size, filter_size,
using_sep_conv=False, using_sep_conv=False,
**kwargs): **kwargs):
super(ConvBnRelu, self).__init__() super(ConvBnRelu, self).__init__()
if using_sep_conv: if using_sep_conv:
...@@ -41,16 +39,16 @@ class ConvBnRelu(dygraph.Layer): ...@@ -41,16 +39,16 @@ class ConvBnRelu(dygraph.Layer):
else: else:
self.conv = Conv2D(num_channels, self.conv = Conv2D(num_channels,
num_filters, num_filters,
filter_size, filter_size,
**kwargs) **kwargs)
self.batch_norm = BatchNorm(num_filters) self.batch_norm = BatchNorm(num_filters)
def forward(self, x): def forward(self, x):
x = self.conv(x) x = self.conv(x)
x = self.batch_norm(x) x = self.batch_norm(x)
x = fluid.layers.relu(x) x = F.relu(x)
return x return x
...@@ -81,7 +79,7 @@ class ConvReluPool(dygraph.Layer): ...@@ -81,7 +79,7 @@ class ConvReluPool(dygraph.Layer):
def forward(self, x): def forward(self, x):
x = self.conv(x) x = self.conv(x)
x = fluid.layers.relu(x) x = F.relu(x)
x = fluid.layers.pool2d(x, pool_size=2, pool_type="max", pool_stride=2) x = fluid.layers.pool2d(x, pool_size=2, pool_type="max", pool_stride=2)
return x return x
...@@ -106,15 +104,15 @@ class DepthwiseConvBnRelu(dygraph.Layer): ...@@ -106,15 +104,15 @@ class DepthwiseConvBnRelu(dygraph.Layer):
**kwargs): **kwargs):
super(DepthwiseConvBnRelu, self).__init__() super(DepthwiseConvBnRelu, self).__init__()
self.depthwise_conv = ConvBn(num_channels, self.depthwise_conv = ConvBn(num_channels,
num_filters=num_channels, num_filters=num_channels,
filter_size=filter_size, filter_size=filter_size,
groups=num_channels, groups=num_channels,
use_cudnn=False, use_cudnn=False,
**kwargs) **kwargs)
self.piontwise_conv = ConvBnRelu(num_channels, self.piontwise_conv = ConvBnRelu(num_channels,
num_filters, num_filters,
filter_size=1, filter_size=1,
groups=1) groups=1)
def forward(self, x): def forward(self, x):
x = self.depthwise_conv(x) x = self.depthwise_conv(x)
...@@ -122,20 +120,43 @@ class DepthwiseConvBnRelu(dygraph.Layer): ...@@ -122,20 +120,43 @@ class DepthwiseConvBnRelu(dygraph.Layer):
return x return x
def compute_loss(logits, label, ignore_index=255): class Activation(fluid.dygraph.Layer):
mask = label != ignore_index """
mask = fluid.layers.cast(mask, 'float32') The wrapper of activations
loss, probs = fluid.layers.softmax_with_cross_entropy( For example:
logits, >>> relu = Activation("relu")
label, >>> print(relu)
ignore_index=ignore_index, <class 'paddle.nn.layer.activation.ReLU'>
return_softmax=True, >>> sigmoid = Activation("sigmoid")
axis=1) >>> print(sigmoid)
<class 'paddle.nn.layer.activation.Sigmoid'>
>>> not_exit_one = Activation("not_exit_one")
KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink',
'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax',
'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])"
Args:
act (str): the activation name in lowercase
"""
def __init__(self, act=None):
super(Activation, self).__init__()
self._act = act
upper_act_names = activation.__all__
lower_act_names = [act.lower() for act in upper_act_names]
act_dict = dict(zip(lower_act_names, upper_act_names))
if act is not None:
if act in act_dict.keys():
act_name = act_dict[act]
self.act_func = eval("activation.{}()".format(act_name))
else:
raise KeyError("{} does not exist in the current {}".format(act, act_dict.keys()))
loss = loss * mask def forward(self, x):
avg_loss = fluid.layers.mean(loss) / (
fluid.layers.mean(mask) + 1e-5)
label.stop_gradient = True if self._act is not None:
mask.stop_gradient = True return self.act_func(x)
return avg_loss else:
\ No newline at end of file return x
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册