提交 8b5431d5 编写于 作者: D dangqingqing

padding operation

上级 495649af
......@@ -17,6 +17,7 @@ if(WITH_TESTING)
# file(GLOB test_files . *OpTest.cpp)
# add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files})
add_simple_unittest(CrossMapNormalOpTest)
add_simple_unittest(PadOpTest)
add_unittest(ContextProjectionOpTest
ContextProjectionOpTest.cpp
../gserver/tests/TestUtil.cpp)
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PadOp.h"
#include "paddle/math/Vector.h"
namespace paddle {
template <>
void Pad<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int padc0,
const int padc1,
const int padh0,
const int padh1,
const int padw0,
const int padw1) {
int outC = inC + padc0 + padc1;
int outH = inH + padh0 + padh1;
int outW = inW + padw0 + padw1;
for (int i = 0; i < num; i++) {
for (int c = 0; c < inC; c++) {
for (int h = 0; h < inH; h++) {
int inoff = ((i * inC + c) * inH + h) * inW;
int outoff = ((i * outC + c + padc0) * outH + h + padh0) * outW + padw0;
memcpy(outputs + outoff, inputs + inoff, inW * sizeof(real));
}
}
}
}
template <>
void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
const real* outGrad,
const int num,
const int inC,
const int inH,
const int inW,
const int padc0,
const int padc1,
const int padh0,
const int padh1,
const int padw0,
const int padw1) {
int outC = inC + padc0 + padc1;
int outH = inH + padh0 + padh1;
int outW = inW + padw0 + padw1;
for (int i = 0; i < num; i++) {
for (int c = 0; c < inC; c++) {
for (int h = 0; h < inH; h++) {
int inoff = ((i * inC + c) * inH + h) * inW;
int outoff = ((i * outC + c + padc0) * outH + h + padh0) * outW + padw0;
CpuVector inG = CpuVector(inW, inGrad + inoff);
CpuVector outG = CpuVector(inW, const_cast<real*>(outGrad + outoff));
inG += outG;
}
}
}
}
/**
* \param inputs[0] input value.
* \param outputs[0] output value.
*/
template <DeviceType Device>
class PadFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override {
padc0_ = config.get<int>("padc0");
padc1_ = config.get<int>("padc1");
padh0_ = config.get<int>("padh0");
padh1_ = config.get<int>("padh1");
padw0_ = config.get<int>("padw0");
padw1_ = config.get<int>("padw1");
}
void calc(const Arguments& inputs,
const Arguments& outputs,
const Arguments& inouts) override {
CHECK_EQ(1, inputs.size());
CHECK_EQ(1, outputs.size());
CHECK_EQ(0, inouts.size());
size_t num = inputs[0].dims_[0];
size_t inC = inputs[0].dims_[1];
size_t inH = inputs[0].dims_[2];
size_t inW = inputs[0].dims_[3];
Pad<Device>(outputs[0].getData(),
inputs[0].getData(),
num,
inC,
inH,
inW,
padc0_,
padc1_,
padh0_,
padh1_,
padw0_,
padw1_);
}
private:
int padc0_;
int padc1_;
int padh0_;
int padh1_;
int padw0_;
int padw1_;
};
/**
* \param inputs[0] input grad.
* \param outputs[0] output grad.
*/
template <DeviceType Device>
class PadGradFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override {
padc0_ = config.get<int>("padc0");
padc1_ = config.get<int>("padc1");
padh0_ = config.get<int>("padh0");
padh1_ = config.get<int>("padh1");
padw0_ = config.get<int>("padw0");
padw1_ = config.get<int>("padw1");
}
void calc(const Arguments& inputs,
const Arguments& outputs,
const Arguments& inouts) override {
CHECK_EQ(1, inputs.size());
CHECK_EQ(0, outputs.size());
CHECK_EQ(1, inouts.size());
size_t n = inouts[0].dims_[0];
size_t inC = inouts[0].dims_[1];
size_t inH = inouts[0].dims_[2];
size_t inW = inouts[0].dims_[3];
PadGrad<Device>(inouts[0].getData(),
inputs[0].getData(),
n,
inC,
inH,
inW,
padc0_,
padc1_,
padh0_,
padh1_,
padw0_,
padw1_);
}
private:
int padc0_;
int padc1_;
int padh0_;
int padh1_;
int padw0_;
int padw1_;
};
REGISTER_TYPED_FUNC(Pad, CPU, PadFunc);
REGISTER_TYPED_FUNC(PadGrad, CPU, PadGradFunc);
#ifndef PADDLE_ONLY_CPU
REGISTER_TYPED_FUNC(Pad, GPU, PadFunc);
REGISTER_TYPED_FUNC(PadGrad, GPU, PadGradFunc);
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Function.h"
namespace paddle {
/**
* \brief This funtion pads zeros to inputs according to the specify dimension.
* The data structure of image data is NCHW.
*
* \param[out] outputs save results.
* \param[in] inputs input data.
* \param[in] num batch size of input data.
* \param[in] inC channel number of input data.
* \param[in] inH height of input data.
* \param[in] inH with of input data.
* \param[in] padc0 how many values to add before the data in dimension of
* channel.
* \param[in] padc1 how many values to add after the data in dimension of
* channel.
* \param[in] padh0 how many values to add before the data in dimension of
* height.
* \param[in] padh1 how many values to add after the data in dimension of
* height.
* \param[in] padw0 how many values to add before the data in dimension of
* width.
* \param[in] padw1 how many values to add after the data in dimension of
* width.
*
*/
template <DeviceType Device>
void Pad(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int padc0,
const int padc1,
const int padh0,
const int padh1,
const int padw0,
const int padw1);
/**
* \brief Padding operation backward.
* The data structure of image data is NCHW.
*
* \param[out] inGrad gradients of previous layer.
* \param[in] outGrad output gradients.
* \param[in] num batch size of input data.
* \param[in] inC channel number of input data.
* \param[in] inH height of input data.
* \param[in] inH with of input data.
* \param[in] padc0 how many values to add before the data in dimension of
* channel.
* \param[in] padc1 how many values to add after the data in dimension of
* channel.
* \param[in] padh0 how many values to add before the data in dimension of
* height.
* \param[in] padh1 how many values to add after the data in dimension of
* height.
* \param[in] padw0 how many values to add before the data in dimension of
* width.
* \param[in] padw1 how many values to add after the data in dimension of
* width.
*
*/
template <DeviceType Device>
void PadGrad(real* inGrad,
const real* outGrad,
const int num,
const int inC,
const int inH,
const int inW,
const int padc0,
const int padc1,
const int padh0,
const int padh1,
const int padw0,
const int padw1);
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "PadOp.h"
namespace paddle {
__global__ void KePad(real* outputs, const real* inputs,
int inC, int inH, int inW,
int padc, int padh, int padw,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
outputs[off] = inputs[idx];
}
}
template <>
void Pad<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int padc0,
const int padc1,
const int padh0,
const int padh1,
const int padw0,
const int padw1) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
int outC = inC + padc0 + padc1;
int outH = inH + padh0 + padh1;
int outW = inW + padw0 + padw1;
KePad<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
(outputs, inputs, inC, inH, inW, padc0, padh0, padw0,
outC, outH, outW, nth);
CHECK_SYNC("Pad");
}
__global__ void KePadDiff(real* inGrad, const real* outGrad,
int inC, int inH, int inW,
int padc, int padh, int padw,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
inGrad[idx] += outGrad[off];
}
}
template <>
void PadGrad<DEVICE_TYPE_GPU>(real* inGrad,
const real* outGrad,
const int num,
const int inC,
const int inH,
const int inW,
const int padc0,
const int padc1,
const int padh0,
const int padh1,
const int padw0,
const int padw1) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
int outC = inC + padc0 + padc1;
int outH = inH + padh0 + padh1;
int outW = inW + padw0 + padw1;
KePadDiff <<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
(inGrad, outGrad, inC, inH, inW, padc0, padh0, padw0,
outC, outH, outW, nth);
CHECK_SYNC("PadGrad");
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "FunctionTest.h"
namespace paddle {
TEST(Pad, real) {
for (size_t numSamples : {5, 32}) {
for (size_t channels : {1, 5, 32}) {
for (size_t imgSizeH : {5, 33, 100}) {
for (size_t imgSizeW : {5, 32, 96}) {
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
FunctionCompare compare("Pad",
FuncConfig()
.set("padc0", 2)
.set("padc1", 3)
.set("padh0", 1)
.set("padh1", 2)
.set("padw0", 3)
.set("padw1", 2));
Dims inDims{numSamples, channels, imgSizeH, imgSizeW};
Dims outDims{numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
compare.cmpWithArg(
{Tensor(nullptr, inDims)}, {Tensor(nullptr, outDims)}, {});
}
}
}
}
}
// TEST(PadGrad, real) {
// for (size_t numSamples : {5, 32}) {
// for (size_t channels : {1, 5, 32}) {
// for (size_t imgSizeH : {5, 33, 100}) {
// for (size_t imgSizeW : {5, 32, 96}) {
// VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
// << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
//
// FunctionCompare compare("PadGrad",
// FuncConfig()
// .set("padc0", 2).set("padc1", 3)
// .set("padh0", 1).set("padh1", 2)
// .set("padw0", 3).set("padw1", 2));
// Dims inDims{numSamples, channels, imgSizeH, imgSizeW};
// Dims outDims{numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
// compare.cmpWithArg({Tensor(nullptr, inDims)},
// {Tensor(nullptr, outDims)},
// {});
// }
// }
// }
// }
//}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PadLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(pad, PadLayer);
bool PadLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
/* Initialize the basic parent class */
Layer::init(layerMap, parameterMap);
auto& pad_conf = config_.inputs(0).pad_conf();
auto& img_conf = pad_conf.image_conf();
CHECK_EQ(config_.inputs_size(), 1);
inDims_.push_back(0);
inDims_.push_back(img_conf.channels());
inDims_.push_back(img_conf.has_img_size_y() ? img_conf.img_size_y()
: img_conf.img_size());
inDims_.push_back(img_conf.img_size());
CHECK_EQ(2UL, pad_conf.pad_c_size());
CHECK_EQ(2UL, pad_conf.pad_h_size());
CHECK_EQ(2UL, pad_conf.pad_w_size());
padc_.push_back(pad_conf.pad_c(0));
padc_.push_back(pad_conf.pad_c(1));
padh_.push_back(pad_conf.pad_h(0));
padh_.push_back(pad_conf.pad_h(1));
padw_.push_back(pad_conf.pad_w(0));
padw_.push_back(pad_conf.pad_w(1));
outDims_.resize(4);
setOutDims(0);
createFunction(forward_,
"Pad",
FuncConfig()
.set("padc0", padc_[0])
.set("padc1", padc_[1])
.set("padh0", padh_[0])
.set("padh1", padh_[1])
.set("padw0", padw_[0])
.set("padw1", padw_[1]));
createFunction(backward_,
"PadGrad",
FuncConfig()
.set("padc0", padc_[0])
.set("padc1", padc_[1])
.set("padh0", padh_[0])
.set("padh1", padh_[1])
.set("padw0", padw_[0])
.set("padw1", padw_[1]));
return true;
}
void PadLayer::setOutDims(int batchSize) {
outDims_[0] = batchSize;
outDims_[1] = inDims_[1] + padc_[0] + padc_[1];
outDims_[2] = inDims_[2] + padh_[0] + padh_[1];
outDims_[3] = inDims_[3] + padw_[0] + padw_[1];
}
void PadLayer::setTensorDim(int batchSize) {
CHECK_EQ(inputLayers_.size(), 1UL);
inDims_[0] = batchSize;
int h = inputLayers_[0]->getOutput().getFrameHeight();
if (h != 0) inDims_[2];
int w = inputLayers_[0]->getOutput().getFrameWidth();
if (w != 0) inDims_[3];
setOutDims(batchSize);
}
void PadLayer::forward(PassType passType) {
Layer::forward(passType);
MatrixPtr input = inputLayers_[0]->getOutputValue();
size_t batchSize = input->getHeight();
setTensorDim(batchSize);
int size = outDims_[1] * outDims_[2] * outDims_[3];
resetOutput(batchSize, size);
MatrixPtr outV = getOutputValue();
REGISTER_TIMER_INFO("PadForward", getName().c_str());
forward_[0]->calc({Tensor(input->getData(), inDims_)},
{Tensor(outV->getData(), outDims_)},
{});
}
void PadLayer::backward(const UpdateCallback& callback) {
(void)callback;
MatrixPtr preGrad = inputLayers_[0]->getOutputGrad();
if (NULL == preGrad) {
return;
}
MatrixPtr outGrad = getOutputGrad();
REGISTER_TIMER_INFO("PadBackward", getName().c_str());
backward_[0]->calc({Tensor(outGrad->getData(), outDims_)},
{},
{Tensor(preGrad->getData(), inDims_)});
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Layer.h"
namespace paddle {
/**
* @brief response normalization across feature maps
* namely normalize in number of size_ channels
*/
class PadLayer : public Layer {
public:
explicit PadLayer(const LayerConfig& config) : Layer(config) {}
~PadLayer() {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
void forward(PassType passType);
void backward(const UpdateCallback& callback = nullptr);
protected:
void setOutDims(int batchSize);
void setTensorDim(int batchSize);
std::vector<int> padc_;
std::vector<int> padh_;
std::vector<int> padw_;
Dims inDims_;
Dims outDims_;
};
} // namespace paddle
......@@ -255,6 +255,13 @@ message PriorBoxConfig {
repeated float variance = 4;
}
message PadConfig {
required ImageConfig image_conf = 1;
repeated uint32 pad_c = 2;
repeated uint32 pad_h = 3;
repeated uint32 pad_w = 4;
}
message LayerInputConfig {
required string input_layer_name = 1;
optional string input_parameter_name = 2;
......@@ -271,6 +278,7 @@ message LayerInputConfig {
optional MaxOutConfig maxout_conf = 11;
optional SppConfig spp_conf = 12;
optional PriorBoxConfig priorbox_conf = 13;
optional PadConfig pad_conf = 14;
}
message LayerConfig {
......
......@@ -493,6 +493,7 @@ class Input(Cfg):
block_expand=None,
maxout=None,
spp=None,
pad=None,
format=None,
nnz=None,
is_static=None,
......@@ -844,6 +845,12 @@ class SpatialPyramidPool(Cfg):
self.add_keys(locals())
@config_class
class Pad(Cfg):
def __init__(self, channels, pad_c, pad_h, pad_w):
self.add_keys(locals())
@config_class
class Norm(Cfg):
def __init__(self,
......@@ -1842,6 +1849,25 @@ class SpatialPyramidPoolLayer(LayerBase):
self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels)
@config_layer('pad')
class PadLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(PadLayer, self).__init__(name, 'pad', 0, inputs=inputs, **xargs)
pad = self.inputs[0].pad
self.config.inputs[0].pad_conf.pad_c.extend(pad.pad_c)
self.config.inputs[0].pad_conf.pad_h.extend(pad.pad_h)
self.config.inputs[0].pad_conf.pad_w.extend(pad.pad_w)
input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].pad_conf.image_conf
parse_image(pad, input_layer.name, image_conf)
out_ch = pad.channels + pad.pad_c[0] + pad.pad_c[1]
out_h = image_conf.img_size_y + pad.pad_h[0] + pad.pad_h[1]
out_w = image_conf.img_size + pad.pad_w[0] + pad.pad_w[1]
self.set_cnn_layer(name, out_h, out_w, out_ch)
self.config.size = out_ch * out_h * out_w
@config_layer('batch_norm')
class BatchNormLayer(LayerBase):
layer_type = 'batch_norm'
......
......@@ -170,6 +170,7 @@ class LayerType(object):
BLOCK_EXPAND = "blockexpand"
MAXOUT = "maxout"
SPP_LAYER = "spp"
PAD_LAYER = "pad"
PRINT_LAYER = "print"
PRIORBOX_LAYER = "priorbox"
......@@ -3488,9 +3489,6 @@ def conv_projection(input,
groups=1,
param_attr=None):
"""
ConvProjection with a layer as input.
It performs element-wise multiplication with weight.
Different from img_conv_layer and conv_op, conv_projection is an Projection,
which can be used in mixed_layer and conat_layer. It use cudnn to implement
conv and only support GPU mode.
......@@ -3499,7 +3497,7 @@ def conv_projection(input,
.. code-block:: python
proj = conv_projection(img=input1,
proj = conv_projection(input=input1,
filter_size=3,
num_filters=64,
num_channels=64)
......@@ -3582,6 +3580,84 @@ def conv_projection(input,
return proj
@wrap_name_default("pad")
@layer_support()
def pad_layer(input,
pad_c=None,
pad_h=None,
pad_w=None,
name=None,
layer_attr=None):
"""
This operation pads zeros to the input data according to pad_c,pad_h
and pad_w. pad_c, pad_h, pad_w specifies the which dimension and size
of padding. And the input data shape is NCHW.
For example, pad_c=[2,3] means padding 2 zeros before the
input data and 3 zeros after the input data in channel dimension.
pad_h means padding zeros in height dimension. pad_w means padding zeros
in width dimension.
.. code-block:: python
pad = pad_layer(input=ipt,
pad_c=[4,4],
pad_h=[0,0],
pad_w=[2,2])
:param input: layer's input.
:type input: LayerOutput
:param pad_c: padding size in channel dimension.
:type pad_c: list|None
:param pad_h: padding size in height dimension.
:type pad_h: list|None
:param pad_w: padding size in width dimension.
:type pad_w: list|None
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:param name: layer name.
:type name: basestring
:return: LayerOutput object.
:rtype: LayerOutput
"""
if pad_c is not None:
assert isinstance(pad_c, collections.Sequence) and len(pad_c) == 2
else:
pad_c = [0, 0]
if pad_h is not None:
assert isinstance(pad_h, collections.Sequence) and len(pad_h) == 2
else:
pad_h = [0, 0]
if pad_w is not None:
assert isinstance(pad_w, collections.Sequence) and len(pad_w) == 2
else:
pad_w = [0, 0]
assert input.num_filters is not None
in_ch = input.num_filters
out_ch = in_ch + pad_c[0] + pad_c[1]
l = Layer(
name=name,
type=LayerType.PAD_LAYER,
inputs=Input(
input.name,
pad=Pad(
channels=in_ch,
pad_c=pad_c,
pad_h=pad_h,
pad_w=pad_w, )),
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
layer_type=LayerType.PAD_LAYER,
parents=[input],
num_filters=out_ch,
size=l.config.size)
@wrap_name_default()
@layer_support()
def conv_shift_layer(a, b, name=None, layer_attr=None):
......
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
data = data_layer(name='data', size=2304, height=48, width=42)
conv = img_conv_layer(
input=data,
filter_size=3,
num_channels=1,
num_filters=16,
padding=1,
act=LinearActivation(),
bias_attr=True)
pool = img_pool_layer(
input=conv, num_channels=8, pool_size=2, stride=2, pool_type=MaxPooling())
pad = pad_layer(input=pool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
outputs(pad)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册