提交 07c1ea25 编写于 作者: W wangyang59

python interface for convTransProjection and convTransOperator

上级 6b7f6474
......@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Operator.h"
#include "paddle/math/MathUtils.h"
......@@ -44,10 +45,8 @@ public:
hl_destroy_filter_descriptor(filterDesc_);
hl_destroy_convolution_descriptor(convDesc_);
}
virtual void forward();
virtual void backward();
private:
protected:
/**
* Get convolution parameters from layer config and
* initialize member variables.
......
......@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "ConvBaseOperator.h"
#include "paddle/math/MathUtils.h"
......@@ -35,8 +36,8 @@ public:
* Free workspace in device and destroy cudnn tensor descriptor.
*/
virtual ~ConvOperator() {}
virtual void forward();
virtual void backward();
void forward() override;
void backward() override;
};
} // namespace paddle
......@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "ConvBaseOperator.h"
#include "paddle/math/MathUtils.h"
......@@ -35,8 +36,8 @@ public:
* Free workspace in device and destroy cudnn tensor descriptor.
*/
virtual ~ConvTransOperator() {}
virtual void forward();
virtual void backward();
void forward() override;
void backward() override;
};
} // namespace paddle
......@@ -1503,16 +1503,20 @@ TEST(Layer, BatchNormalizationLayer) {
#endif
}
TEST(Operator, conv) {
void testConvOperator(bool isDeconv) {
TestConfig config;
const int NUM_FILTERS = 16;
const int FILTER_SIZE = 2;
const int FILTER_SIZE_Y = 3;
const int CHANNELS = 3;
const int IMAGE_SIZE = 16;
const int IMAGE_SIZE_Y = 8;
const int IMAGE_SIZE_Y = 9;
OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs();
operatorConf.set_type("conv");
if (isDeconv) {
operatorConf.set_type("convt");
} else {
operatorConf.set_type("conv");
}
ConvConfig* conv = operatorConf.mutable_conv_conf();
operatorConf.set_num_filters(NUM_FILTERS);
conv->set_filter_size(FILTER_SIZE);
......@@ -1523,7 +1527,6 @@ TEST(Operator, conv) {
conv->set_stride(2);
conv->set_stride_y(2);
conv->set_groups(1);
conv->set_filter_channels(conv->channels() / conv->groups());
conv->set_img_size(IMAGE_SIZE);
conv->set_img_size_y(IMAGE_SIZE_Y);
conv->set_output_x(outputSize(conv->img_size(),
......@@ -1536,11 +1539,22 @@ TEST(Operator, conv) {
conv->padding_y(),
conv->stride_y(),
/* caffeMode */ true));
config.layerConfig.set_size(conv->output_x() * conv->output_y() *
NUM_FILTERS);
config.inputDefs.push_back(
{INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0});
if (isDeconv) {
conv->set_filter_channels(NUM_FILTERS / conv->groups());
config.inputDefs.push_back({INPUT_DATA,
"layer_0",
conv->output_x() * conv->output_y() * CHANNELS,
0});
config.layerConfig.set_size(IMAGE_SIZE * IMAGE_SIZE_Y * NUM_FILTERS);
} else {
conv->set_filter_channels(conv->channels() / conv->groups());
config.inputDefs.push_back(
{INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0});
config.layerConfig.set_size(conv->output_x() * conv->output_y() *
NUM_FILTERS);
}
config.inputDefs.push_back(
{INPUT_DATA,
"layer_1",
......@@ -1552,6 +1566,11 @@ TEST(Operator, conv) {
testOperatorGrad(config, operatorConf, 100, /*useGpu*/ true, false);
}
TEST(Operator, conv) {
testConvOperator(/*isDeconv*/ true);
testConvOperator(/*isDeconv*/ false);
}
TEST(Layer, FeatureMapExpandLayer) {
TestConfig config;
config.layerConfig.set_type("featmap_expand");
......
......@@ -686,25 +686,17 @@ class ContextProjection(Projection):
@config_class
class ConvProjection(Projection):
type = 'conv'
class ConvBaseProjection(Projection):
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvProjection, self).__init__(input_layer_name, **xargs)
super(ConvBaseProjection, self).__init__(input_layer_name, **xargs)
if num_filters is not None:
self.proj_conf.num_filters = num_filters
parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf,
num_filters)
self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
self.proj_conf.conv_conf.output_y * \
num_filters
def calc_output_size(self, input_layer_config):
return self.proj_conf.output_size
......@@ -723,6 +715,46 @@ class ConvProjection(Projection):
return None
@config_class
class ConvProjection(ConvBaseProjection):
type = 'conv'
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvProjection, self).__init__(input_layer_name, **xargs)
parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf,
num_filters)
self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
self.proj_conf.conv_conf.output_y * \
num_filters
@config_class
class ConvTransProjection(ConvBaseProjection):
type = 'convt'
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvTransProjection, self).__init__(input_layer_name, **xargs)
parse_conv(
conv_conf,
input_layer_name,
self.proj_conf.conv_conf,
num_filters,
trans=True)
self.proj_conf.output_size = self.proj_conf.conv_conf.img_size_y * \
self.proj_conf.conv_conf.img_size * \
num_filters
# Define a operator for mixed layer
@config_class
class Operator(Cfg):
......@@ -789,6 +821,36 @@ class ConvOperator(Operator):
return self.operator_conf.output_size
@config_class
class ConvTransOperator(Operator):
type = 'convt'
def __init__(self,
input_layer_names,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvTransOperator, self).__init__(input_layer_names, **xargs)
if num_filters is not None:
self.operator_conf.num_filters = num_filters
parse_conv(
conv_conf,
MakeLayerNameInSubmodel(input_layer_names[0]),
self.operator_conf.conv_conf,
num_filters,
trans=True)
self.operator_conf.output_size = \
self.operator_conf.conv_conf.img_size * \
self.operator_conf.conv_conf.img_size_y * \
num_filters
config_assert(len(input_layer_names) == 2, "Conv is binary operator")
def calc_output_size(self, input_sizes):
return self.operator_conf.output_size
# please refer to the comments in proto/ModelConfig.proto
@config_class
class Conv(Cfg):
......
......@@ -712,8 +712,10 @@ class MixedLayerType(LayerOutput):
assert len(self.inputs) == 0
return self
def __exit__(self, *args, **kwargs):
del args, kwargs # unused parameter to suppress warning
def __exit__(self, exc_type, exc_value, tb):
if exc_type != None:
traceback.print_exception(exc_type, exc_value, tb)
sys.exit(1)
assert len(self.inputs) != 0
ml = MixedLayer(
name=self.name,
......@@ -3715,7 +3717,8 @@ def conv_operator(img,
padding=0,
filter_size_y=None,
stride_y=None,
padding_y=None):
padding_y=None,
trans=False):
"""
Different from img_conv_layer, conv_op is an Operator, which can be used
in mixed_layer. And conv_op takes two inputs to perform convolution.
......@@ -3771,7 +3774,9 @@ def conv_operator(img,
if filter.size is not None:
filter.size = filter_size * filter_size_y * num_filters * num_channels
op = ConvOperator(
opCls = ConvTransOperator if trans else ConvOperator
op = opCls(
input_layer_names=[img.name, filter.name],
num_filters=num_filters,
conv_conf=Conv(
......@@ -3783,6 +3788,7 @@ def conv_operator(img,
padding_y=padding_y,
stride_y=stride_y,
groups=1))
op.origin = [img, filter]
return op
......@@ -3798,7 +3804,8 @@ def conv_projection(input,
stride_y=None,
padding_y=None,
groups=1,
param_attr=None):
param_attr=None,
trans=False):
"""
Different from img_conv_layer and conv_op, conv_projection is an Projection,
which can be used in mixed_layer and conat_layer. It use cudnn to implement
......@@ -3837,6 +3844,8 @@ def conv_projection(input,
:type groups: int
:param param_attr: Convolution param attribute. None means default attribute
:type param_attr: ParameterAttribute
:param trans: whether it is convTrans or conv
:type trans: boolean
:return: A DotMulProjection Object.
:rtype: DotMulProjection
"""
......@@ -3873,7 +3882,9 @@ def conv_projection(input,
param_attr.attr["initial_strategy"] = 0
param_attr.attr["initial_smart"] = False
proj = ConvProjection(
projCls = ConvTransProjection if trans else ConvProjection
proj = projCls(
input_layer_name=input.name,
num_filters=num_filters,
conv_conf=Conv(
......
......@@ -34,11 +34,31 @@ flt = data_layer(name='filter', size=3 * 3 * 1 * 64)
with mixed_layer() as m7:
m7 += conv_operator(
img=img, filter=flt, num_filters=64, num_channels=1, filter_size=3)
m7 += conv_projection(img, filter_size=3, num_filters=64, num_channels=1)
with mixed_layer() as m8:
m8 += conv_operator(
img=img,
filter=flt,
num_filters=64,
num_channels=1,
filter_size=3,
stride=2,
padding=1,
trans=True)
m8 += conv_projection(
img,
filter_size=3,
num_filters=64,
num_channels=1,
stride=2,
padding=1,
trans=True)
end = mixed_layer(
input=[
full_matrix_projection(input=m5),
trans_full_matrix_projection(input=m6), full_matrix_projection(input=m7)
trans_full_matrix_projection(input=m6),
full_matrix_projection(input=m7), full_matrix_projection(input=m8)
],
size=100,
layer_attr=ExtraAttr(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册