提交 6da00da7 编写于 作者: X xzl

code format check

上级 7dc584f5
......@@ -378,14 +378,15 @@ extern void hl_maxout_backward(real* inGrad,
* @param[in] outputW the output widht.
* @param[out] outputData output data.
*/
extern void hl_upsample_forward(real *inputData, real *maskData,
extern void hl_upsample_forward(real* inputData,
real* maskData,
size_t batchSize,
size_t imgSizeH,
size_t imgSizeW,
size_t channels,
size_t outputH,
size_t outputW,
real *outputData);
real* outputData);
/**
* @brief Upsample backward.
......@@ -399,13 +400,14 @@ extern void hl_upsample_forward(real *inputData, real *maskData,
* @param[in] outputW the output widht.
* @param[out] inputGradData the input grad data.
*/
extern void hl_upsample_backward(real *outputGradData, real *maskData,
extern void hl_upsample_backward(real* outputGradData,
real* maskData,
size_t batchSize,
size_t imgSizeH,
size_t imgSizeW,
size_t channels,
size_t outputH,
size_t outputW,
real *inputGradData);
real* inputGradData);
#endif // HL_CNN_H_
......@@ -222,22 +222,24 @@ inline void hl_maxout_backward(real* inGrad,
size_t featLen,
size_t group) {}
inline void hl_upsample_forward(real *inputData, real *maskData,
inline void hl_upsample_forward(real* inputData,
real* maskData,
size_t batchSize,
size_t imgSizeH,
size_t imgSizeW,
size_t channels,
size_t outputH,
size_t outputW,
real *outputData) {}
real* outputData) {}
inline void hl_upsample_backward(real *outputGradData, real *maskData,
inline void hl_upsample_backward(real* outputGradData,
real* maskData,
size_t batchSize,
size_t imgSizeH,
size_t imgSizeW,
size_t channels,
size_t outputH,
size_t outputW,
real *inputGradData) {}
real* inputGradData) {}
#endif // HL_CNN_STUB_H_
......@@ -30,6 +30,7 @@ size_t UpsampleLayer::getOutputSize() {
bool UpsampleLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
CHECK_EQ(inputLayers_.size(), 2U);
CHECK_EQ(config_.inputs_size(), 2);
const auto& conf = config_.inputs(0).upsample_conf();
......
......@@ -32,7 +32,6 @@ namespace paddle {
class UpsampleLayer : public Layer {
public:
explicit UpsampleLayer(const LayerConfig& config) : Layer(config) {}
~UpsampleLayer() {}
bool init(const LayerMap& layerMap,
......
......@@ -1033,9 +1033,9 @@ void GpuMatrix::upsampleForward(Matrix& input,
CHECK(input.useGpu_ == true) << "Matrix type are not equal";
CHECK(mask.useGpu_ == true) << "Matrix type are not equal";
real *inputData = input.getData();
real *maskData = mask.getData();
real *outData = data_;
real* inputData = input.getData();
real* maskData = mask.getData();
real* outData = data_;
size_t batch = input.getHeight();
......@@ -1043,7 +1043,8 @@ void GpuMatrix::upsampleForward(Matrix& input,
CHECK(imgSizeH * imgSizeW * channels == mask.getWidth());
CHECK_EQ(batch, this->getHeight());
CHECK(width_ == outputH * outputW * channels);
hl_upsample_forward(inputData, maskData,
hl_upsample_forward(inputData,
maskData,
batch,
imgSizeH,
imgSizeW,
......@@ -1063,15 +1064,16 @@ void GpuMatrix::upsampleBackward(Matrix& outputGrad,
CHECK(outputGrad.useGpu_ == true) << "Matrix type are not equal";
CHECK(mask.useGpu_ == true) << "Matrix type are not equal";
real *outputGradData = outputGrad.getData();
real *maskData = mask.getData();
real *inputGradData = data_;
real* outputGradData = outputGrad.getData();
real* maskData = mask.getData();
real* inputGradData = data_;
size_t batch = outputGrad.getHeight();
CHECK(imgSizeH * imgSizeW == this->getWidth()/channels);
CHECK(imgSizeH * imgSizeW == this->getWidth() / channels);
CHECK_EQ(batch, this->getHeight());
CHECK_EQ(channels * outputH * outputW, outputGrad.getWidth());
hl_upsample_backward(outputGradData, maskData,
hl_upsample_backward(outputGradData,
maskData,
batch,
imgSizeH,
imgSizeW,
......@@ -2046,9 +2048,9 @@ void CpuMatrix::upsampleForward(Matrix& input,
size_t channels,
size_t outputH,
size_t outputW) {
real *inputData = input.getData();
real *maskData = mask.getData();
real *outData = data_;
real* inputData = input.getData();
real* maskData = mask.getData();
real* outData = data_;
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
size_t batch = input.getHeight();
......@@ -2061,8 +2063,7 @@ void CpuMatrix::upsampleForward(Matrix& input,
for (size_t i = 0; i < inLength; i++) {
size_t out_index = static_cast<int>(maskData[i]);
if (out_index >= outLength) {
LOG(FATAL) << "upsample index " << out_index
<< " out of range.";
LOG(FATAL) << "upsample index " << out_index << " out of range.";
}
outData[out_index] = inputData[i];
}
......@@ -2080,13 +2081,13 @@ void CpuMatrix::upsampleBackward(Matrix& outputGrad,
size_t channels,
size_t outputH,
size_t outputW) {
real *outputGradData = outputGrad.getData();
real *maskData = mask.getData();
real *inputGradData = data_;
real* outputGradData = outputGrad.getData();
real* maskData = mask.getData();
real* inputGradData = data_;
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
size_t batch = outputGrad.getHeight();
CHECK(inLength == this->getWidth()/channels);
CHECK(inLength == this->getWidth() / channels);
CHECK_EQ(batch, this->getHeight());
CHECK_EQ(channels * outLength, outputGrad.getWidth());
......@@ -2095,8 +2096,7 @@ void CpuMatrix::upsampleBackward(Matrix& outputGrad,
for (size_t i = 0; i < inLength; i++) {
size_t out_index = static_cast<int>(maskData[i]);
if (out_index >= outLength) {
LOG(FATAL) << "upsample index " << out_index
<< " out of range.";
LOG(FATAL) << "upsample index " << out_index << " out of range.";
}
inputGradData[i] = outputGradData[out_index];
}
......
......@@ -978,12 +978,14 @@ class Pad(Cfg):
def __init__(self, channels, pad_c, pad_h, pad_w):
self.add_keys(locals())
@config_class
class Upsample(Cfg):
def __init__(self, scale, scale_y, pad_out_x, pad_out_y, upsample_size,
upsample_size_y):
self.add_keys(locals())
@config_class
class Norm(Cfg):
def __init__(self,
......@@ -2393,6 +2395,7 @@ class SpatialPyramidPoolLayer(LayerBase):
output_x = (pow(4, spp_conf.pyramid_height) - 1) / (4 - 1)
self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels)
@config_layer('upsample')
class UpsampleLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
......@@ -2410,6 +2413,7 @@ class UpsampleLayer(LayerBase):
output_x = 0
output_y = 0
output_size = 0
if upsample.scale:
self.config.inputs[0].upsample_conf.scale = upsample.scale
self.config.inputs[0].upsample_conf.scale_y = upsample.scale_y
......@@ -2427,11 +2431,11 @@ class UpsampleLayer(LayerBase):
output_size = image_conf.channels * output_x * output_y
self.set_layer_height_width(output_y, output_x)
self.set_layer_depth(input_layer.depth)
self.set_layer_size(output_size)
@config_layer('pad')
class PadLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
......
......@@ -2881,6 +2881,7 @@ def img_pool3d_layer(input,
num_filters=num_channels,
size=l.config.size)
@wrap_name_default("upsample")
@layer_support()
def upsample_layer(input,
......@@ -2930,6 +2931,7 @@ def upsample_layer(input,
'scale or upsample_size, there must be one to be designated'
assert len(input) == 2, 'layer input size must be 2'
assert input[1].layer_type == LayerType.POOL_LAYER, \
'the second input should be the MaxPoolWithMaskLayer'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册