conv_op.h 17.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

Y
Yi Wang 已提交
17 18 19 20 21 22
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/depthwise_conv.h"
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/vol2col.h"
23 24 25 26 27 28

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

武毅 已提交
29 30
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
Y
Yang Yang 已提交
31 32
inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding, int stride) {
C
chengduoZH 已提交
33
  const int dkernel = dilation * (filter_size - 1) + 1;
C
chengduoZH 已提交
34 35 36 37 38 39 40 41
  int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
  PADDLE_ENFORCE(
      output_size > 0,
      "Due to the settings of padding(%d), filter_size(%d), dilation(%d) and "
      "stride(%d), the output size is less than 0, please check "
      "again. Input_size:%d",
      padding, filter_size, dilation, stride, input_size);

武毅 已提交
42 43
  return output_size;
}
C
chengduoZH 已提交
44 45 46
inline bool IsExpand(std::vector<int64_t>& filter_dim,
                     std::vector<int>& strides, std::vector<int>& paddings,
                     std::vector<int>& dilations) {
C
chengduoZH 已提交
47 48
  bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
  for (size_t j = 0; j < strides.size(); ++j) {
C
chengduoZH 已提交
49
    filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
C
chengduoZH 已提交
50 51 52
    strides_1 = strides_1 && (strides[j] == 1);
    padding_0 = padding_0 && (paddings[j] == 0);
    dilation_1 = dilation_1 && (dilations[j] == 1);
C
chengduoZH 已提交
53
  }
C
chengduoZH 已提交
54
  return !(filter_1 && strides_1 && padding_0 && dilation_1);
C
chengduoZH 已提交
55
}
武毅 已提交
56 57 58 59 60

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
61
  Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker);
武毅 已提交
62 63
};

C
chengduoZH 已提交
64 65
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
66
  Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker);
C
chengduoZH 已提交
67 68 69
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
70 71 72
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
73 74 75 76

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
77 78
};

C
chengduoZH 已提交
79
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
80 81 82
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
83 84 85 86

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
87 88
};

Q
QI JUN 已提交
89
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
90
class GemmConvKernel : public framework::OpKernel<T> {
91 92 93
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
H
hedaoyuan 已提交
94 95 96 97
    // The filter will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
98 99 100
    Tensor* output = context.Output<Tensor>("Output");
    output->mutable_data<T>(context.GetPlace());

C
chengduoZH 已提交
101
    int groups = context.Attr<int>("groups");
102 103
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
104
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
105

C
chengduoZH 已提交
106 107
    const int batch_size = static_cast<int>(input->dims()[0]);

C
chengduoZH 已提交
108
    // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
C
chengduoZH 已提交
109
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
C
chengduoZH 已提交
110
    // output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
C
chengduoZH 已提交
111
    std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
112

H
hedaoyuan 已提交
113
    // use col_shape in the im2col calculation
C
chengduoZH 已提交
114 115
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
C
chengduoZH 已提交
116 117 118 119 120 121 122
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = input->dims()[1] / groups;
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
    }
C
chengduoZH 已提交
123 124
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));

H
hedaoyuan 已提交
125
    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
126 127 128
    // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d *
    // o_h * o_w)
    framework::DDim col_matrix_shape =
C
chengduoZH 已提交
129
        framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
130

C
chengduoZH 已提交
131
    bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
H
hedaoyuan 已提交
132
    Tensor col;
H
hedaoyuan 已提交
133 134 135
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
136
    Tensor col_matrix;
C
chengduoZH 已提交
137
    if (is_expand) {
C
chengduoZH 已提交
138 139 140 141
      col.mutable_data<T>(col_shape, context.GetPlace());
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);
    }
142

C
chengduoZH 已提交
143 144 145
    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));

H
hedaoyuan 已提交
146 147
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
H
hedaoyuan 已提交
148 149
    filter.Resize(filter_matrix_shape);

C
chengduoZH 已提交
150 151 152 153 154 155 156 157
    framework::DDim output_matrix_shape = {
        output->dims()[1],
        output->numel() / (output->dims()[0] * output->dims()[1])};

    // convolution operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output->dims()[1]) / groups;

Q
QI JUN 已提交
158 159
    math::Vol2ColFunctor<DeviceContext, T> vol2col;
    math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
C
chengduoZH 已提交
160

Q
QI JUN 已提交
161
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
162 163 164
    for (int i = 0; i < batch_size; i++) {
      Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
      Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
C
chengduoZH 已提交
165

C
chengduoZH 已提交
166 167
      for (int g = 0; g < groups; g++) {
        Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
H
hedaoyuan 已提交
168

C
chengduoZH 已提交
169
        if (!is_expand) {
C
chengduoZH 已提交
170 171 172
          col.ShareDataWith(in_slice);
          col_matrix.ShareDataWith(col);
          col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
173
        } else if (data_dim == 2U) {
C
chengduoZH 已提交
174
          // im2col
Q
QI JUN 已提交
175
          im2col(dev_ctx, in_slice, dilations, strides,
C
chengduoZH 已提交
176 177 178
                 std::vector<int>{paddings[0], paddings[1], paddings[0],
                                  paddings[1]},
                 &col);
C
chengduoZH 已提交
179
        } else if (data_dim == 3U) {
C
chengduoZH 已提交
180
          // vol2col
Q
QI JUN 已提交
181
          vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col);
C
chengduoZH 已提交
182
        }
C
chengduoZH 已提交
183 184 185 186

        // gemm
        Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
        Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
Q
QI JUN 已提交
187 188
        math::matmul<DeviceContext, T>(dev_ctx, filter_slice, false, col_matrix,
                                       false, T(1.0), &out_slice, T(0.0));
H
hedaoyuan 已提交
189
      }
190 191 192 193
    }
  }
};

Q
QI JUN 已提交
194
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
195
class GemmConvGradKernel : public framework::OpKernel<T> {
196 197
 public:
  void Compute(const framework::ExecutionContext& context) const override {
H
hedaoyuan 已提交
198 199 200 201 202
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
H
hedaoyuan 已提交
203
    Tensor* filter_grad =
H
hedaoyuan 已提交
204
        context.Output<Tensor>(framework::GradVarName("Filter"));
H
hedaoyuan 已提交
205 206 207 208
    // The filter and filter_grad will be reshaped in the calculations,
    // so here use an assignment operation,
    // that avoids modifying the variable in the Scope.
    Tensor filter = *context.Input<Tensor>("Filter");
H
hedaoyuan 已提交
209

C
chengduoZH 已提交
210 211
    if (!input_grad && !filter_grad) return;

C
chengduoZH 已提交
212
    int groups = context.Attr<int>("groups");
H
hedaoyuan 已提交
213 214
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
C
chengduoZH 已提交
215
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");
H
hedaoyuan 已提交
216

C
chengduoZH 已提交
217
    const int batch_size = static_cast<int>(input->dims()[0]);
H
hedaoyuan 已提交
218

C
chengduoZH 已提交
219
    // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
C
chengduoZH 已提交
220
    std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
C
chengduoZH 已提交
221
    // output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
C
chengduoZH 已提交
222 223
    std::vector<int64_t> output_shape_vec(
        framework::vectorize(output_grad->dims()));
C
chengduoZH 已提交
224

C
chengduoZH 已提交
225 226 227
    // use col_shape in the im2col calculation
    // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
    // o_h, o_w}
C
chengduoZH 已提交
228 229 230 231 232 233 234
    size_t data_dim = filter_shape_vec.size() - 2;
    std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
    col_shape_vec[0] = input->dims()[1] / groups;
    for (size_t j = 0; j < data_dim; ++j) {
      col_shape_vec[j + 1] = filter_shape_vec[j + 2];
      col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
    }
C
chengduoZH 已提交
235
    framework::DDim col_shape(framework::make_ddim(col_shape_vec));
C
chengduoZH 已提交
236 237

    // use col_matrix_shape in the gemm calculation
C
chengduoZH 已提交
238 239 240 241
    // size: (i_c/g * k_h * k_w, o_h * o_w)
    // or
    // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w)
    framework::DDim col_matrix_shape =
C
chengduoZH 已提交
242
        framework::flatten_to_2d(col_shape, data_dim + 1);
C
chengduoZH 已提交
243 244 245

    framework::DDim input_shape = framework::slice_ddim(
        input->dims(), 1, static_cast<int>(input->dims().size()));
C
chengduoZH 已提交
246

C
chengduoZH 已提交
247 248
    framework::DDim filter_matrix_shape = {filter.dims()[0],
                                           filter.numel() / filter.dims()[0]};
C
chengduoZH 已提交
249 250 251
    filter.Resize(filter_matrix_shape);

    framework::DDim output_matrix_shape = {
C
chengduoZH 已提交
252 253 254
        output_grad->dims()[1],
        output_grad->numel() /
            (output_grad->dims()[0] * output_grad->dims()[1])};
C
chengduoZH 已提交
255

C
chengduoZH 已提交
256 257 258 259
    // convolution backward input operator:  gemm + col2im(or col2vol)
    // convolution backward weight operator: im2col(or vol2col) + gemm
    int in_step = static_cast<int>(input->dims()[1]) / groups;
    int out_step = static_cast<int>(output_grad->dims()[1]) / groups;
C
chengduoZH 已提交
260

C
chengduoZH 已提交
261
    bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
C
chengduoZH 已提交
262 263 264 265
    Tensor col;
    // col_matrix shares the same piece of data with col,
    // but will be reshaped into a two-dimensional matrix shape
    // to call the matrix multiplication interface.
C
chengduoZH 已提交
266
    Tensor col_matrix;
C
chengduoZH 已提交
267
    if (is_expand) {
C
chengduoZH 已提交
268 269 270 271
      col.mutable_data<T>(col_shape, context.GetPlace());
      col_matrix.ShareDataWith(col);
      col_matrix.Resize(col_matrix_shape);
    }
C
chengduoZH 已提交
272

Q
QI JUN 已提交
273 274
    math::SetConstant<DeviceContext, T> set_zero;
    auto& dev_ctx = context.template device_context<DeviceContext>();
C
chengduoZH 已提交
275 276 277 278

    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());

C
chengduoZH 已提交
279 280 281
      // if is_expand is false, the operation of set_zero is unnecessary,
      // because math::matmul will reset input_grad.
      if (is_expand) {
C
chengduoZH 已提交
282
        set_zero(dev_ctx, input_grad, static_cast<T>(0));
C
chengduoZH 已提交
283
      }
Q
QI JUN 已提交
284 285
      math::Col2VolFunctor<DeviceContext, T> col2vol;
      math::Col2ImFunctor<math::ColFormat::kCFO, DeviceContext, T> col2im;
C
chengduoZH 已提交
286

C
chengduoZH 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300
      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // gemm
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);

          Tensor in_grad_slice =
              in_grad_batch.Slice(g * in_step, (g + 1) * in_step);

          if (!is_expand) {
C
chengduoZH 已提交
301 302
            col_matrix.ShareDataWith(in_grad_slice);
            col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
303
          }
Q
QI JUN 已提交
304 305 306
          math::matmul<DeviceContext, T>(dev_ctx, filter_slice, true,
                                         out_grad_slice, false, T(1.0),
                                         &col_matrix, T(0.0));
C
chengduoZH 已提交
307

C
chengduoZH 已提交
308
          if (is_expand && data_dim == 2U) {
Q
QI JUN 已提交
309
            col2im(dev_ctx, col, dilations, strides,
C
chengduoZH 已提交
310 311 312
                   std::vector<int>{paddings[0], paddings[1], paddings[0],
                                    paddings[1]},
                   &in_grad_slice);
C
chengduoZH 已提交
313
          } else if (is_expand && data_dim == 3U) {
Q
QI JUN 已提交
314
            col2vol(dev_ctx, col, dilations, strides, paddings, &in_grad_slice);
C
chengduoZH 已提交
315
          }
C
chengduoZH 已提交
316 317 318 319 320 321 322 323
        }
      }
    }

    if (filter_grad) {
      filter_grad->mutable_data<T>(context.GetPlace());
      Tensor filter_grad_ = *filter_grad;
      filter_grad_.Resize(filter_matrix_shape);
Q
QI JUN 已提交
324 325 326
      set_zero(dev_ctx, filter_grad, static_cast<T>(0));
      math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col;
      math::Vol2ColFunctor<DeviceContext, T> vol2col;
C
chengduoZH 已提交
327 328 329 330 331 332 333 334 335
      for (int i = 0; i < batch_size; i++) {
        Tensor out_grad_batch =
            output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
        Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
        for (int g = 0; g < groups; g++) {
          // im2col
          Tensor out_grad_slice =
              out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
          Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
C
chengduoZH 已提交
336

C
chengduoZH 已提交
337
          if (!is_expand) {
C
chengduoZH 已提交
338 339 340
            col.ShareDataWith(in_slice);
            col_matrix.ShareDataWith(col);
            col_matrix.Resize(col_matrix_shape);
C
chengduoZH 已提交
341
          } else if (data_dim == 2U) {
Q
QI JUN 已提交
342
            im2col(dev_ctx, in_slice, dilations, strides,
C
chengduoZH 已提交
343 344 345
                   std::vector<int>{paddings[0], paddings[1], paddings[0],
                                    paddings[1]},
                   &col);
C
chengduoZH 已提交
346
          } else if (data_dim == 3U) {
Q
QI JUN 已提交
347
            vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col);
C
chengduoZH 已提交
348
          }
C
chengduoZH 已提交
349 350 351 352

          // gemm
          Tensor filter_grad_slice =
              filter_grad_.Slice(g * out_step, (g + 1) * out_step);
Q
QI JUN 已提交
353 354 355
          math::matmul<DeviceContext, T>(dev_ctx, out_grad_slice, false,
                                         col_matrix, true, T(1.0),
                                         &filter_grad_slice, T(1.0));
C
chengduoZH 已提交
356 357 358 359 360
        }
      }
    }
  }
};
Z
zlx 已提交
361 362 363 364 365 366 367 368 369 370

template <typename DeviceContext, typename T>
class DepthwiseConvKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    Tensor filter = *context.Input<Tensor>("Filter");
    Tensor* output = context.Output<Tensor>("Output");
    output->mutable_data<T>(context.GetPlace());

X
xzl 已提交
371 372 373
    PADDLE_ENFORCE_EQ(
        output->dims()[1] % input->dims()[1], 0,
        "The output channels must be a multiple of the input channels");
Z
zlx 已提交
374 375 376 377 378 379 380
    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");

    math::DepthwiseConvFunctor<DeviceContext, T> depthwiseConv;

    auto& dev_ctx = context.template device_context<DeviceContext>();
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
    depthwiseConv(dev_ctx, *input, filter, strides, paddings, output);
  }
};

template <typename DeviceContext, typename T>
class DepthwiseConvGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    const Tensor* input = context.Input<Tensor>("Input");
    const Tensor* output_grad =
        context.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad =
        context.Output<Tensor>(framework::GradVarName("Input"));
    Tensor* filter_grad =
        context.Output<Tensor>(framework::GradVarName("Filter"));
    Tensor filter = *context.Input<Tensor>("Filter");

    if (!input_grad && !filter_grad) return;

    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
    std::vector<int> dilations = context.Attr<std::vector<int>>("dilations");

    math::SetConstant<DeviceContext, T> set_zero;
    auto& dev_ctx = context.template device_context<DeviceContext>();

    math::DepthwiseConvInputGradFunctor<DeviceContext, T>
        depthwiseConvInputGrad;
    math::DepthwiseConvFilterGradFunctor<DeviceContext, T>
        depthwiseConvFilterGrad;

    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());
      set_zero(dev_ctx, input_grad, static_cast<T>(0));
      depthwiseConvInputGrad(dev_ctx, *input, filter, *output_grad, strides,
                             paddings, input_grad);
    }

    if (filter_grad) {
      filter_grad->mutable_data<T>(context.GetPlace());
      set_zero(dev_ctx, filter_grad, static_cast<T>(0));
      depthwiseConvFilterGrad(dev_ctx, *input, *output_grad, strides, paddings,
                              filter_grad);
    }
Z
zlx 已提交
425 426 427
  }
};

428 429
}  // namespace operators
}  // namespace paddle