You need to sign in or sign up before continuing.
提交 2710584f 编写于 作者: C chengduoZH

fix above comments

上级 f715c740
......@@ -901,20 +901,14 @@ class Conv3D(Cfg):
padding_z=None,
stride_z=None):
self.add_keys(locals())
if filter_size_y is None:
self.filter_size_y = filter_size
if padding_y is None:
self.padding_y = padding
if stride_y is None:
self.stride_y = stride
self.filter_size_y = filter_size_y if filter_size_y else filter_size
self.filter_size_z = filter_size_z if filter_size_z else filter_size
self.padding_y = padding_y if padding_y else padding
self.padding_z = padding_z if padding_z else padding
self.stride_y = stride_y if stride_y else stride
self.stride_z = stride_z if stride_z else stride
if output_x is not None:
config_assert(output_x <= 0)
if filter_size_z is None:
self.filter_size_z = filter_size
if padding_z is None:
self.padding_z = padding
if stride_z is None:
self.stride_z = stride
@config_class
......@@ -1206,10 +1200,10 @@ def get_img_size(input_layer_name, channels):
def get_img3d_size(input_layer_name, channels):
input = g_layer_map[input_layer_name]
img_pixels = input.size / channels
img_size = input.width if input.width > 0 else int(img_pixels**0.5)
img_size_y = input.height if input.height > 0 else int(img_pixels /
img_size)
img_size_z = input.depth if input.depth > 1 else 1
img_size = input.width
img_size_y = input.height
img_size_z = input.depth
config_assert(
img_size * img_size_y * img_size_z == img_pixels,
"Input layer %s: Incorrect input image size %d * %d * %d for input image pixels %d"
......@@ -2000,8 +1994,10 @@ class ConvLayer(ConvLayerBase):
layer_type = 'cudnn_conv'
@config_layer('conv_3d')
class Conv3DLayerBase(LayerBase):
@config_layer('convt')
class ConvTransLayerBase(LayerBase):
layer_type = 'convt'
def __init__(self,
name,
inputs=[],
......@@ -2009,7 +2005,7 @@ class Conv3DLayerBase(LayerBase):
num_filters=None,
shared_biases=False,
**xargs):
super(Conv3DLayerBase, self).__init__(
super(ConvTransLayerBase, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if num_filters is not None:
......@@ -2018,12 +2014,17 @@ class Conv3DLayerBase(LayerBase):
use_gpu = int(g_command_config_args.get("use_gpu", 0))
parallel_nn = int(g_command_config_args.get("parallel_nn", 0))
# Automatically select cudnn_type for GPU and exconv for CPU
# if set type=conv, but still reserve the way user specify
# exconv or cudnn_conv manually.
if self.layer_type == "cudnn_conv3d":
config_assert(use_gpu, "cudnn_conv3d only support GPU")
# Automatically select cudnn_type for GPU and exconvt for CPU
# if set type=exconvt, but still reserve the way user specify
# exconvt or cudnn_convt manually.
if self.layer_type == "cudnn_convt":
config_assert(use_gpu, "cudnn_convt only support GPU")
if (use_gpu == 1 and self.layer_type != "exconvt" and
(parallel_nn == 0 or self.config.device > -1)):
self.layer_type = "cudnn_convt"
else:
self.layer_type = "exconvt"
# need to specify layer in config
self.config.type = self.layer_type
......@@ -2032,15 +2033,17 @@ class Conv3DLayerBase(LayerBase):
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
parse_conv(
self.inputs[input_index].conv,
input_layer.name,
self.config.inputs[input_index].conv_conf,
num_filters,
trans=True)
conv_conf = self.config.inputs[input_index].conv_conf
parse_conv3d(
self.inputs[input_index].conv, input_layer.name, conv_conf,
num_filters
) # for z-axis pad:0, strid:1, filter_size:1, img_size:1
psize = self.calc_parameter_size(conv_conf)
self.create_input_parameter(input_index, psize)
self.set_cnn_layer(name, conv_conf.output_z, conv_conf.output_y,
conv_conf.output_x, self.config.num_filters)
self.set_cnn_layer(name, conv_conf.img_size_y, conv_conf.img_size,
self.config.num_filters)
psize = self.config.size
if shared_biases:
......@@ -2048,62 +2051,42 @@ class Conv3DLayerBase(LayerBase):
self.create_bias_parameter(bias, psize, [psize, 1])
def calc_parameter_size(self, conv_conf):
return self.config.num_filters * conv_conf.filter_channels \
* (conv_conf.filter_size * conv_conf.filter_size_y \
* conv_conf.filter_size_z)
return conv_conf.channels * conv_conf.filter_channels \
* (conv_conf.filter_size * conv_conf.filter_size_y)
def set_layer_height_width(self, depth, height, width):
self.config.depth = depth
self.config.height = height
self.config.width = width
def set_cnn_layer(self,
input_layer_name,
depth,
height,
width,
channels,
is_print=True):
size = depth * height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(depth, height, width)
if is_print:
print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, depth, height, width, size))
@config_layer('exconvt')
class ConvTransLayer(ConvTransLayerBase):
layer_type = 'exconvt'
@config_layer('conv3d')
class Conv3DLayer(Conv3DLayerBase):
layer_type = 'conv3d'
@config_layer('cudnn_convt')
class ConvTransLayer(ConvTransLayerBase):
layer_type = 'cudnn_convt'
@config_layer('convt_3d')
class Conv3DTransLayerBase(LayerBase):
@config_layer('conv_3d')
class Conv3DLayerBase(LayerBase):
def __init__(self,
name,
inputs=[],
bias=True,
num_filters=None,
shared_biases=False,
shared_biases=True,
**xargs):
super(Conv3DTransLayerBase, self).__init__(
super(Conv3DLayerBase, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if num_filters is not None:
self.config.num_filters = num_filters
use_gpu = int(g_command_config_args.get("use_gpu", 0))
parallel_nn = int(g_command_config_args.get("parallel_nn", 0))
# Automatically select cudnn_type for GPU and exconv for CPU
# if set type=conv, but still reserve the way user specify
# exconv or cudnn_conv manually.
if self.layer_type == "cudnn_deconv3d":
config_assert(use_gpu, "cudnn_conv3d only support GPU")
# need to specify layer in config
self.config.type = self.layer_type
trans = False
if self.config.type == "deconv3d":
trans = True
if shared_biases is not None:
self.config.shared_biases = shared_biases
......@@ -2115,12 +2098,17 @@ class Conv3DTransLayerBase(LayerBase):
input_layer.name,
conv_conf,
num_filters,
trans=True
trans=trans
) # for z-axis pad:0, strid:1, filter_size:1, img_size:1
psize = self.calc_parameter_size(conv_conf)
self.create_input_parameter(input_index, psize)
self.set_cnn_layer(name, conv_conf.img_size_z, conv_conf.img_size_y,
conv_conf.img_size, self.config.num_filters)
if trans:
self.set_cnn_layer(name, conv_conf.img_size_z,
conv_conf.img_size_y, conv_conf.img_size,
self.config.num_filters)
else:
self.set_cnn_layer(name, conv_conf.output_z, conv_conf.output_y,
conv_conf.output_x, self.config.num_filters)
psize = self.config.size
if shared_biases:
......@@ -2132,11 +2120,6 @@ class Conv3DTransLayerBase(LayerBase):
* (conv_conf.filter_size * conv_conf.filter_size_y \
* conv_conf.filter_size_z)
def set_layer_height_width(self, depth, height, width):
self.config.depth = depth
self.config.height = height
self.config.width = width
def set_cnn_layer(self,
input_layer_name,
depth,
......@@ -2146,86 +2129,21 @@ class Conv3DTransLayerBase(LayerBase):
is_print=True):
size = depth * height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(depth, height, width)
self.set_layer_height_width(height, width)
self.set_layer_depth(depth)
if is_print:
print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, depth, height, width, size))
@config_layer('deconv3d')
class DeConv3DLayer(Conv3DTransLayerBase):
layer_type = 'deconv3d'
@config_layer('convt')
class ConvTransLayerBase(LayerBase):
layer_type = 'convt'
def __init__(self,
name,
inputs=[],
bias=True,
num_filters=None,
shared_biases=False,
**xargs):
super(ConvTransLayerBase, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if num_filters is not None:
self.config.num_filters = num_filters
use_gpu = int(g_command_config_args.get("use_gpu", 0))
parallel_nn = int(g_command_config_args.get("parallel_nn", 0))
# Automatically select cudnn_type for GPU and exconvt for CPU
# if set type=exconvt, but still reserve the way user specify
# exconvt or cudnn_convt manually.
if self.layer_type == "cudnn_convt":
config_assert(use_gpu, "cudnn_convt only support GPU")
if (use_gpu == 1 and self.layer_type != "exconvt" and
(parallel_nn == 0 or self.config.device > -1)):
self.layer_type = "cudnn_convt"
else:
self.layer_type = "exconvt"
# need to specify layer in config
self.config.type = self.layer_type
if shared_biases is not None:
self.config.shared_biases = shared_biases
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
parse_conv(
self.inputs[input_index].conv,
input_layer.name,
self.config.inputs[input_index].conv_conf,
num_filters,
trans=True)
conv_conf = self.config.inputs[input_index].conv_conf
psize = self.calc_parameter_size(conv_conf)
self.create_input_parameter(input_index, psize)
self.set_cnn_layer(name, conv_conf.img_size_y, conv_conf.img_size,
self.config.num_filters)
psize = self.config.size
if shared_biases:
psize = self.config.num_filters
self.create_bias_parameter(bias, psize, [psize, 1])
def calc_parameter_size(self, conv_conf):
return conv_conf.channels * conv_conf.filter_channels \
* (conv_conf.filter_size * conv_conf.filter_size_y)
@config_layer('exconvt')
class ConvTransLayer(ConvTransLayerBase):
layer_type = 'exconvt'
@config_layer('conv3d')
class Conv3DLayer(Conv3DLayerBase):
layer_type = 'conv3d'
@config_layer('cudnn_convt')
class ConvTransLayer(ConvTransLayerBase):
layer_type = 'cudnn_convt'
@config_layer('deconv3d')
class Conv3DLayer(Conv3DLayerBase):
layer_type = 'deconv3d'
@config_layer('norm')
......
......@@ -6161,12 +6161,6 @@ def img_conv3d_layer(input,
param_attr=None,
shared_biases=True,
layer_attr=None,
filter_size_y=None,
stride_y=None,
padding_y=None,
filter_size_z=None,
stride_z=None,
padding_z=None,
trans=False,
layer_type=None):
"""
......@@ -6175,7 +6169,7 @@ def img_conv3d_layer(input,
.. code-block:: python
conv = img_conv3d_layer(input=data, filter_size=1, filter_size_y=1,
conv = img_conv3d_layer(input=data, filter_size=1,
num_channels=8,
num_filters=16, stride=1,
bias_attr=False,
......@@ -6185,13 +6179,8 @@ def img_conv3d_layer(input,
:type name: basestring
:param input: Layer Input.
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel. Or input a tuple for
two image dimension.
:param filter_size: The x dimension of a filter kernel. Or input a list.
:type filter_size: int|tuple|list
:param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle
currently supports rectangular filters, the filter's
shape will be (filter_size, filter_size_y).
:type filter_size_y: int|None
:param num_filters: Each filter group's number of filter
:param act: Activation type. Default is tanh
:type act: BaseActivation
......@@ -6200,13 +6189,9 @@ def img_conv3d_layer(input,
:param stride: The x dimension of the stride. Or input a tuple for two image
dimension.
:type stride: int|tuple|list
:param stride_y: The y dimension of the stride.
:type stride_y: int
:param padding: The x dimension of the padding. Or input a tuple for two
image dimension
:type padding: int|tuple|list
:param padding_y: The y dimension of the padding.
:type padding_y: int
:param bias_attr: Convolution bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|False
......@@ -6233,47 +6218,26 @@ def img_conv3d_layer(input,
assert input.num_filters is not None
num_channels = input.num_filters
if filter_size_y is None:
if isinstance(filter_size, collections.Sequence):
assert len(filter_size) == 2
filter_size, filter_size_y = filter_size
else:
filter_size_y = filter_size
if filter_size_z is None:
if isinstance(filter_size, collections.Sequence):
assert len(filter_size) == 2
filter_size, filter_size_z = filter_size
else:
filter_size_z = filter_size
if stride_y is None:
if isinstance(stride, collections.Sequence):
assert len(stride) == 2
stride, stride_y = stride
else:
stride_y = stride
if stride_z is None:
if isinstance(stride, collections.Sequence):
assert len(stride) == 2
stride, stride_z = stride
else:
stride_z = stride
if isinstance(filter_size, collections.Sequence):
assert len(filter_size) == 3
filter_size, filter_size_y, filter_size_z = filter_size
else:
filter_size_y = filter_size
filter_size_z = filter_size
if padding_y is None:
if isinstance(padding, collections.Sequence):
assert len(padding) == 2
padding, padding_y = padding
else:
padding_y = padding
if isinstance(stride, collections.Sequence):
assert len(stride) == 3
stride, stride_y, stride_z = stride
else:
stride_y = stride
stride_z = stride
if padding_z is None:
if isinstance(padding, collections.Sequence):
assert len(padding) == 2
padding, padding_z = padding
else:
padding_z = padding
if isinstance(padding, collections.Sequence):
assert len(padding) == 3
padding, padding_y, padding_z = padding
else:
padding_y = padding
padding_z = padding
if param_attr.attr.get('initial_smart'):
# special initial for conv layers.
......
......@@ -14,23 +14,44 @@ padding_y = 1
padding_z = 1
groups = 1
data = data_layer(
name='data1', size=12096 * num_channels, height=48, width=42, depth=6)
data1 = data_layer(name='data1', size=2016 * num_channels, height=48, width=42)
conv3d = img_conv3d_layer(
img_conv_layer(
input=data1,
filter_size=filter_size,
num_channels=num_channels,
num_filters=16,
stride=stride,
padding=padding,
act=LinearActivation(),
bias_attr=False)
data = data_layer(
name='data', size=12096 * num_channels, height=48, width=42, depth=6)
# first
conv3d_1 = img_conv3d_layer(
input=data,
name='conv3d_1',
num_filters=16,
num_channels=num_channels,
filter_size=filter_size,
filter_size_y=filter_size,
filter_size_z=filter_size,
stride=stride,
stride_y=stride_y,
stride_z=stride_z,
padding=padding,
padding_y=padding_y,
padding_z=padding_z,
groups=groups,
bias_attr=True,
shared_biases=True,
trans=False,
layer_type="conv3d",
act=LinearActivation())
# second
conv3d_2 = img_conv3d_layer(
input=data,
name='conv3d_2',
num_filters=16,
num_channels=num_channels,
filter_size=[filter_size, filter_size_y, filter_size_z],
stride=[stride, stride_y, stride_z],
padding=[padding, padding_y, padding_z],
groups=groups,
bias_attr=True,
shared_biases=True,
......@@ -38,61 +59,33 @@ conv3d = img_conv3d_layer(
layer_type="conv3d",
act=LinearActivation())
deconv3d = img_conv3d_layer(
# first
deconv3d_1 = img_conv3d_layer(
input=data,
name='deconv3d_1',
num_filters=16,
num_channels=num_channels,
filter_size=filter_size,
filter_size_y=filter_size,
filter_size_z=filter_size,
stride=stride,
stride_y=stride_y,
stride_z=stride_z,
padding=padding,
padding_y=padding_y,
padding_z=padding_z,
groups=groups,
bias_attr=True,
shared_biases=True,
trans=True,
trans=False,
layer_type="deconv3d",
act=LinearActivation())
data = data_layer(name="input", size=8 * 16 * 16)
conv1 = img_conv_layer(
input=data,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
stride=1,
bias_attr=False,
act=ReluActivation(),
layer_type="exconv")
conv2 = img_conv_layer(
input=data,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
stride=1,
bias_attr=False,
act=ReluActivation(),
layer_type="exconv")
concat = concat_layer(input=[conv1, conv2])
conv = img_conv_layer(
# second
deconv3d_2 = img_conv3d_layer(
input=data,
filter_size=1,
filter_size_y=1,
num_channels=8,
name='deconv3d_2',
num_filters=16,
stride=1,
num_channels=num_channels,
filter_size=[filter_size, filter_size_y, filter_size_z],
stride=[stride, stride_y, stride_z],
padding=[padding, padding_y, padding_z],
groups=groups,
bias_attr=True,
act=LinearActivation(),
groups=2,
layer_type="exconv")
outputs(concat, conv)
shared_biases=True,
trans=False,
layer_type="deconv3d",
act=LinearActivation())
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册