提交 07c1ea25 编写于 作者: W wangyang59

python interface for convTransProjection and convTransOperator

上级 6b7f6474
...@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include "Operator.h" #include "Operator.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
...@@ -44,10 +45,8 @@ public: ...@@ -44,10 +45,8 @@ public:
hl_destroy_filter_descriptor(filterDesc_); hl_destroy_filter_descriptor(filterDesc_);
hl_destroy_convolution_descriptor(convDesc_); hl_destroy_convolution_descriptor(convDesc_);
} }
virtual void forward();
virtual void backward();
private: protected:
/** /**
* Get convolution parameters from layer config and * Get convolution parameters from layer config and
* initialize member variables. * initialize member variables.
......
...@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include "ConvBaseOperator.h" #include "ConvBaseOperator.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
...@@ -35,8 +36,8 @@ public: ...@@ -35,8 +36,8 @@ public:
* Free workspace in device and destroy cudnn tensor descriptor. * Free workspace in device and destroy cudnn tensor descriptor.
*/ */
virtual ~ConvOperator() {} virtual ~ConvOperator() {}
virtual void forward(); void forward() override;
virtual void backward(); void backward() override;
}; };
} // namespace paddle } // namespace paddle
...@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include "ConvBaseOperator.h" #include "ConvBaseOperator.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
...@@ -35,8 +36,8 @@ public: ...@@ -35,8 +36,8 @@ public:
* Free workspace in device and destroy cudnn tensor descriptor. * Free workspace in device and destroy cudnn tensor descriptor.
*/ */
virtual ~ConvTransOperator() {} virtual ~ConvTransOperator() {}
virtual void forward(); void forward() override;
virtual void backward(); void backward() override;
}; };
} // namespace paddle } // namespace paddle
...@@ -1503,16 +1503,20 @@ TEST(Layer, BatchNormalizationLayer) { ...@@ -1503,16 +1503,20 @@ TEST(Layer, BatchNormalizationLayer) {
#endif #endif
} }
TEST(Operator, conv) { void testConvOperator(bool isDeconv) {
TestConfig config; TestConfig config;
const int NUM_FILTERS = 16; const int NUM_FILTERS = 16;
const int FILTER_SIZE = 2; const int FILTER_SIZE = 2;
const int FILTER_SIZE_Y = 3; const int FILTER_SIZE_Y = 3;
const int CHANNELS = 3; const int CHANNELS = 3;
const int IMAGE_SIZE = 16; const int IMAGE_SIZE = 16;
const int IMAGE_SIZE_Y = 8; const int IMAGE_SIZE_Y = 9;
OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs(); OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs();
operatorConf.set_type("conv"); if (isDeconv) {
operatorConf.set_type("convt");
} else {
operatorConf.set_type("conv");
}
ConvConfig* conv = operatorConf.mutable_conv_conf(); ConvConfig* conv = operatorConf.mutable_conv_conf();
operatorConf.set_num_filters(NUM_FILTERS); operatorConf.set_num_filters(NUM_FILTERS);
conv->set_filter_size(FILTER_SIZE); conv->set_filter_size(FILTER_SIZE);
...@@ -1523,7 +1527,6 @@ TEST(Operator, conv) { ...@@ -1523,7 +1527,6 @@ TEST(Operator, conv) {
conv->set_stride(2); conv->set_stride(2);
conv->set_stride_y(2); conv->set_stride_y(2);
conv->set_groups(1); conv->set_groups(1);
conv->set_filter_channels(conv->channels() / conv->groups());
conv->set_img_size(IMAGE_SIZE); conv->set_img_size(IMAGE_SIZE);
conv->set_img_size_y(IMAGE_SIZE_Y); conv->set_img_size_y(IMAGE_SIZE_Y);
conv->set_output_x(outputSize(conv->img_size(), conv->set_output_x(outputSize(conv->img_size(),
...@@ -1536,11 +1539,22 @@ TEST(Operator, conv) { ...@@ -1536,11 +1539,22 @@ TEST(Operator, conv) {
conv->padding_y(), conv->padding_y(),
conv->stride_y(), conv->stride_y(),
/* caffeMode */ true)); /* caffeMode */ true));
config.layerConfig.set_size(conv->output_x() * conv->output_y() *
NUM_FILTERS);
config.inputDefs.push_back( if (isDeconv) {
{INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0}); conv->set_filter_channels(NUM_FILTERS / conv->groups());
config.inputDefs.push_back({INPUT_DATA,
"layer_0",
conv->output_x() * conv->output_y() * CHANNELS,
0});
config.layerConfig.set_size(IMAGE_SIZE * IMAGE_SIZE_Y * NUM_FILTERS);
} else {
conv->set_filter_channels(conv->channels() / conv->groups());
config.inputDefs.push_back(
{INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0});
config.layerConfig.set_size(conv->output_x() * conv->output_y() *
NUM_FILTERS);
}
config.inputDefs.push_back( config.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
"layer_1", "layer_1",
...@@ -1552,6 +1566,11 @@ TEST(Operator, conv) { ...@@ -1552,6 +1566,11 @@ TEST(Operator, conv) {
testOperatorGrad(config, operatorConf, 100, /*useGpu*/ true, false); testOperatorGrad(config, operatorConf, 100, /*useGpu*/ true, false);
} }
TEST(Operator, conv) {
testConvOperator(/*isDeconv*/ true);
testConvOperator(/*isDeconv*/ false);
}
TEST(Layer, FeatureMapExpandLayer) { TEST(Layer, FeatureMapExpandLayer) {
TestConfig config; TestConfig config;
config.layerConfig.set_type("featmap_expand"); config.layerConfig.set_type("featmap_expand");
......
...@@ -686,25 +686,17 @@ class ContextProjection(Projection): ...@@ -686,25 +686,17 @@ class ContextProjection(Projection):
@config_class @config_class
class ConvProjection(Projection): class ConvBaseProjection(Projection):
type = 'conv'
def __init__(self, def __init__(self,
input_layer_name, input_layer_name,
num_filters=None, num_filters=None,
conv_conf=None, conv_conf=None,
**xargs): **xargs):
super(ConvProjection, self).__init__(input_layer_name, **xargs) super(ConvBaseProjection, self).__init__(input_layer_name, **xargs)
if num_filters is not None: if num_filters is not None:
self.proj_conf.num_filters = num_filters self.proj_conf.num_filters = num_filters
parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf,
num_filters)
self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
self.proj_conf.conv_conf.output_y * \
num_filters
def calc_output_size(self, input_layer_config): def calc_output_size(self, input_layer_config):
return self.proj_conf.output_size return self.proj_conf.output_size
...@@ -723,6 +715,46 @@ class ConvProjection(Projection): ...@@ -723,6 +715,46 @@ class ConvProjection(Projection):
return None return None
@config_class
class ConvProjection(ConvBaseProjection):
type = 'conv'
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvProjection, self).__init__(input_layer_name, **xargs)
parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf,
num_filters)
self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
self.proj_conf.conv_conf.output_y * \
num_filters
@config_class
class ConvTransProjection(ConvBaseProjection):
type = 'convt'
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvTransProjection, self).__init__(input_layer_name, **xargs)
parse_conv(
conv_conf,
input_layer_name,
self.proj_conf.conv_conf,
num_filters,
trans=True)
self.proj_conf.output_size = self.proj_conf.conv_conf.img_size_y * \
self.proj_conf.conv_conf.img_size * \
num_filters
# Define a operator for mixed layer # Define a operator for mixed layer
@config_class @config_class
class Operator(Cfg): class Operator(Cfg):
...@@ -789,6 +821,36 @@ class ConvOperator(Operator): ...@@ -789,6 +821,36 @@ class ConvOperator(Operator):
return self.operator_conf.output_size return self.operator_conf.output_size
@config_class
class ConvTransOperator(Operator):
type = 'convt'
def __init__(self,
input_layer_names,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvTransOperator, self).__init__(input_layer_names, **xargs)
if num_filters is not None:
self.operator_conf.num_filters = num_filters
parse_conv(
conv_conf,
MakeLayerNameInSubmodel(input_layer_names[0]),
self.operator_conf.conv_conf,
num_filters,
trans=True)
self.operator_conf.output_size = \
self.operator_conf.conv_conf.img_size * \
self.operator_conf.conv_conf.img_size_y * \
num_filters
config_assert(len(input_layer_names) == 2, "Conv is binary operator")
def calc_output_size(self, input_sizes):
return self.operator_conf.output_size
# please refer to the comments in proto/ModelConfig.proto # please refer to the comments in proto/ModelConfig.proto
@config_class @config_class
class Conv(Cfg): class Conv(Cfg):
......
...@@ -712,8 +712,10 @@ class MixedLayerType(LayerOutput): ...@@ -712,8 +712,10 @@ class MixedLayerType(LayerOutput):
assert len(self.inputs) == 0 assert len(self.inputs) == 0
return self return self
def __exit__(self, *args, **kwargs): def __exit__(self, exc_type, exc_value, tb):
del args, kwargs # unused parameter to suppress warning if exc_type != None:
traceback.print_exception(exc_type, exc_value, tb)
sys.exit(1)
assert len(self.inputs) != 0 assert len(self.inputs) != 0
ml = MixedLayer( ml = MixedLayer(
name=self.name, name=self.name,
...@@ -3715,7 +3717,8 @@ def conv_operator(img, ...@@ -3715,7 +3717,8 @@ def conv_operator(img,
padding=0, padding=0,
filter_size_y=None, filter_size_y=None,
stride_y=None, stride_y=None,
padding_y=None): padding_y=None,
trans=False):
""" """
Different from img_conv_layer, conv_op is an Operator, which can be used Different from img_conv_layer, conv_op is an Operator, which can be used
in mixed_layer. And conv_op takes two inputs to perform convolution. in mixed_layer. And conv_op takes two inputs to perform convolution.
...@@ -3771,7 +3774,9 @@ def conv_operator(img, ...@@ -3771,7 +3774,9 @@ def conv_operator(img,
if filter.size is not None: if filter.size is not None:
filter.size = filter_size * filter_size_y * num_filters * num_channels filter.size = filter_size * filter_size_y * num_filters * num_channels
op = ConvOperator( opCls = ConvTransOperator if trans else ConvOperator
op = opCls(
input_layer_names=[img.name, filter.name], input_layer_names=[img.name, filter.name],
num_filters=num_filters, num_filters=num_filters,
conv_conf=Conv( conv_conf=Conv(
...@@ -3783,6 +3788,7 @@ def conv_operator(img, ...@@ -3783,6 +3788,7 @@ def conv_operator(img,
padding_y=padding_y, padding_y=padding_y,
stride_y=stride_y, stride_y=stride_y,
groups=1)) groups=1))
op.origin = [img, filter] op.origin = [img, filter]
return op return op
...@@ -3798,7 +3804,8 @@ def conv_projection(input, ...@@ -3798,7 +3804,8 @@ def conv_projection(input,
stride_y=None, stride_y=None,
padding_y=None, padding_y=None,
groups=1, groups=1,
param_attr=None): param_attr=None,
trans=False):
""" """
Different from img_conv_layer and conv_op, conv_projection is an Projection, Different from img_conv_layer and conv_op, conv_projection is an Projection,
which can be used in mixed_layer and conat_layer. It use cudnn to implement which can be used in mixed_layer and conat_layer. It use cudnn to implement
...@@ -3837,6 +3844,8 @@ def conv_projection(input, ...@@ -3837,6 +3844,8 @@ def conv_projection(input,
:type groups: int :type groups: int
:param param_attr: Convolution param attribute. None means default attribute :param param_attr: Convolution param attribute. None means default attribute
:type param_attr: ParameterAttribute :type param_attr: ParameterAttribute
:param trans: whether it is convTrans or conv
:type trans: boolean
:return: A DotMulProjection Object. :return: A DotMulProjection Object.
:rtype: DotMulProjection :rtype: DotMulProjection
""" """
...@@ -3873,7 +3882,9 @@ def conv_projection(input, ...@@ -3873,7 +3882,9 @@ def conv_projection(input,
param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_strategy"] = 0
param_attr.attr["initial_smart"] = False param_attr.attr["initial_smart"] = False
proj = ConvProjection( projCls = ConvTransProjection if trans else ConvProjection
proj = projCls(
input_layer_name=input.name, input_layer_name=input.name,
num_filters=num_filters, num_filters=num_filters,
conv_conf=Conv( conv_conf=Conv(
......
...@@ -34,11 +34,31 @@ flt = data_layer(name='filter', size=3 * 3 * 1 * 64) ...@@ -34,11 +34,31 @@ flt = data_layer(name='filter', size=3 * 3 * 1 * 64)
with mixed_layer() as m7: with mixed_layer() as m7:
m7 += conv_operator( m7 += conv_operator(
img=img, filter=flt, num_filters=64, num_channels=1, filter_size=3) img=img, filter=flt, num_filters=64, num_channels=1, filter_size=3)
m7 += conv_projection(img, filter_size=3, num_filters=64, num_channels=1)
with mixed_layer() as m8:
m8 += conv_operator(
img=img,
filter=flt,
num_filters=64,
num_channels=1,
filter_size=3,
stride=2,
padding=1,
trans=True)
m8 += conv_projection(
img,
filter_size=3,
num_filters=64,
num_channels=1,
stride=2,
padding=1,
trans=True)
end = mixed_layer( end = mixed_layer(
input=[ input=[
full_matrix_projection(input=m5), full_matrix_projection(input=m5),
trans_full_matrix_projection(input=m6), full_matrix_projection(input=m7) trans_full_matrix_projection(input=m6),
full_matrix_projection(input=m7), full_matrix_projection(input=m8)
], ],
size=100, size=100,
layer_attr=ExtraAttr( layer_attr=ExtraAttr(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册