conv_op.cc 17.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_op.h"
Y
Update  
Yi Wang 已提交
16 17 18 19

#include <string>
#include <vector>

20 21 22 23 24 25
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
C
chengduoZH 已提交
26 27 28 29

namespace paddle {
namespace operators {

C
chengduoZH 已提交
30
void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
31
  PADDLE_ENFORCE(ctx->HasInput("Input"),
C
chengduoZH 已提交
32
                 "Input(Input) of ConvOp should not be null.");
C
chengduoZH 已提交
33
  PADDLE_ENFORCE(ctx->HasInput("Filter"),
C
chengduoZH 已提交
34
                 "Input(Filter) of ConvOp should not be null.");
C
chengduoZH 已提交
35
  PADDLE_ENFORCE(ctx->HasOutput("Output"),
C
chengduoZH 已提交
36
                 "Output(Output) of ConvOp should not be null.");
C
chengduoZH 已提交
37 38 39

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
40

C
chengduoZH 已提交
41 42 43
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
  int groups = ctx->Attrs().Get<int>("groups");
C
chengduoZH 已提交
44
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
C
chengduoZH 已提交
45

C
chengduoZH 已提交
46 47
  PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5,
                 "Conv intput should be 4-D or 5-D tensor.");
C
chengduoZH 已提交
48 49 50 51 52 53 54 55 56
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
      "Conv input dimension and filter dimension should be the same.");
  PADDLE_ENFORCE(
      in_dims.size() - strides.size() == 2U,
      "Conv input dimension and strides dimension should be consistent.");
  PADDLE_ENFORCE_EQ(
      paddings.size(), strides.size(),
      "Conv paddings dimension and Conv strides dimension should be the same.");
F
fengjiayi 已提交
57

Y
Yang Yu 已提交
58
  PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups,
C
chengduoZH 已提交
59
                    "The number of input channels should be equal to filter "
C
chengduoZH 已提交
60
                    "channels * groups.");
C
chengduoZH 已提交
61
  PADDLE_ENFORCE_EQ(
Y
Yang Yu 已提交
62
      filter_dims[0] % groups, 0,
C
chengduoZH 已提交
63 64 65
      "The number of output channels should be divided by groups.");

  std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
C
chengduoZH 已提交
66
  for (size_t i = 0; i < strides.size(); ++i) {
Y
Yang Yang 已提交
67 68 69
    output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
                                          dilations[i], paddings[i],
                                          strides[i]));
C
chengduoZH 已提交
70
  }
71
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
72
  ctx->ShareLoD("Input", "Output");
C
chengduoZH 已提交
73 74
}

75 76
framework::OpKernelType ConvOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
77
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
78
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
79
  std::string data_format = ctx.Attr<std::string>("data_format");
M
mozga-intel 已提交
80 81
  framework::DataLayout layout = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
82
#ifdef PADDLE_WITH_CUDA
83
  if (platform::CanCUDNNBeUsed(ctx)) {
84
    library = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
85 86
  }
#endif
87
#ifdef PADDLE_WITH_MKLDNN
88
  if (library == framework::LibraryType::kPlain &&
89
      platform::CanMKLDNNBeUsed(ctx)) {
90
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
91
    layout = framework::DataLayout::kMKLDNN;
92
  }
93
#endif
94

M
minqiyang 已提交
95 96
  auto input_data_type = ctx.Input<Tensor>("Input")->type();
  auto filter_data_type = ctx.Input<Tensor>("Filter")->type();
K
Kexin Zhao 已提交
97 98 99 100
  PADDLE_ENFORCE_EQ(input_data_type, filter_data_type,
                    "input and filter data type should be consistent");

  if (input_data_type == framework::proto::VarType::FP16) {
101
    PADDLE_ENFORCE_EQ(library, framework::LibraryType::kCUDNN,
K
Kexin Zhao 已提交
102 103 104
                      "float16 can only be used when CUDNN is used");
  }

105 106
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
107 108
}

Y
Yu Yang 已提交
109
void Conv2DOpMaker::Make() {
110 111 112 113
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
C
chengduoZH 已提交
114 115
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
116 117 118 119
      "(Tensor) The input tensor of convolution operator. "
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
120
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
121
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
122 123
           "The format of the filter tensor is MCHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
124 125
           "H is the height of the filter, and W is the width of the filter. "
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
126
           "input image channels divided by the groups.");
127 128 129 130 131
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
      .AsDispensable();
C
chengduoZH 已提交
132
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
133
            "(Tensor) The output tensor of convolution operator. "
134
            "The format of output tensor is also NCHW.");
135 136 137
  AddInput("ResidualData",
           "(Tensor) Tensor with residual data "
           "to which convolution output will be added."
138
           "Used with fuse_residual_connection fusion.")
139
      .AsDispensable();
C
chengduoZH 已提交
140 141 142 143
  AddAttr<std::vector<int>>("strides",
                            "(vector<int> default:{1, 1}), the "
                            "strides(h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
144
      .SetDefault({1, 1});
C
chengduoZH 已提交
145 146 147 148
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int> default:{0, 0}), the "
                            "paddings(h_pad, w_pad) of "
                            "convolution operator.")
C
chengduoZH 已提交
149 150 151
      .SetDefault({0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
152
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
153 154 155 156
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
157
      .SetDefault(1);
C
chengduoZH 已提交
158
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
159 160
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of "
C
chengduoZH 已提交
161
                            "convolution operator.")
C
chengduoZH 已提交
162
      .SetDefault({1, 1});
163 164 165 166
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
167 168 169
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
M
Michal Gallus 已提交
170 171
  AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
172
  AddAttr<bool>("fuse_residual_connection",
173
                "(bool, default false) Only used in mkldnn kernel. Used "
174 175
                "whenever convolution output is as an input to residual "
                "connection.")
176
      .SetDefault(false);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. Need set use_cudnn to true."
               "workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
193 194 195 196 197
  AddAttr<bool>("exhaustive_search",
                "(bool, default false) cuDNN has many algorithm to calculation "
                "convolution, whether enable exhaustive search ",
                "for cuDNN convolution or not, defalut is False.")
      .SetDefault(false);
C
chengduoZH 已提交
198
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
199 200
Convolution Operator.

C
chengduoZH 已提交
201
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
202
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
203
parameters is checked in the infer-shape.
C
chengduoZH 已提交
204
Input(Input) and Output(Output) are in NCHW format. Where N is batch
C
fix doc  
chengduoZH 已提交
205
size, C is the number of channels, H is the height of the feature, and W is
C
chengduoZH 已提交
206 207 208 209 210 211
the width of the feature.
Filters(Input) is MCHW format. Where M is the number of output image channels, C is
the number of input image channels, H is the height of the filter, and W
is the width of the filter.
Parameters(strides, paddings, dilations) are two elements. These two elements represent
height and width, respectively.
C
chengduoZH 已提交
212 213 214 215
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
216 217
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, H_f, W_f)$
C
chengduoZH 已提交
218
  Output:
C
chengduoZH 已提交
219 220 221 222 223 224
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
$$
       H_{out}= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1
$$
C
chengduoZH 已提交
225
)DOC");
Q
qingqing01 已提交
226
  Apply();
C
chengduoZH 已提交
227 228
}

Y
Yu Yang 已提交
229
void Conv3DOpMaker::Make() {
C
chengduoZH 已提交
230 231
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
232
      "(Tensor) The input tensor of convolution operator. "
C
chengduoZH 已提交
233
      "The format of input tensor is NCDHW. Where N is batch size, C is the "
C
fix doc  
chengduoZH 已提交
234 235 236
      "number of channels, D is the depth of the feature, H is the height of "
      "the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
237
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
238
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
239 240
           "The format of the filter tensor is MCDHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
241 242 243
           "D is the depth of the filter, H is the height of the filter, and W "
           "is the width of the filter."
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
244 245
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
246
            "(Tensor) The output tensor of convolution operator."
247
            "The format of output tensor is also NCDHW.");
C
chengduoZH 已提交
248 249 250 251
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default:{1, 1, 1}), the "
                            "strides(d_stride, h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
252
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
253 254 255 256
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int>, default:{0, 0, 0}), the "
                            "paddings(d_pad, h_pad, w_pad) of convolution "
                            "operator.")
C
chengduoZH 已提交
257 258 259
      .SetDefault({0, 0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
260
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
261 262 263 264
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
265
      .SetDefault(1);
C
chengduoZH 已提交
266
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
267 268
                            "(vector<int> default:{1, 1, 1}), the "
                            "dilations(d_dilation, h_dilation, w_dilation) of "
C
chengduoZH 已提交
269
                            "convolution operator.")
C
chengduoZH 已提交
270
      .SetDefault({1, 1, 1});
271 272 273 274
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
275 276 277
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
293 294 295 296 297
  AddAttr<bool>("exhaustive_search",
                "(bool, default false) cuDNN has many algorithm to calculation "
                "convolution, whether enable exhaustive search ",
                "for cuDNN convolution or not, defalut is False.")
      .SetDefault(false);
C
chengduoZH 已提交
298
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
299 300
Convolution3D Operator.

C
chengduoZH 已提交
301
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
302
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
303
parameters is checked in the infer-shape.
C
chengduoZH 已提交
304
Input(Input) and output(Output) are in NCDHW format, where N is batch
C
fix doc  
chengduoZH 已提交
305
size, C is the number of channels,D is the depth of the feature, H is the height of
C
chengduoZH 已提交
306 307 308 309 310 311
the feature, and W is the width of the feature.
Filters(Input) is MCDHW format, where M is the number of output image channels,
C is the number of input image channels, D is the depth of the filter,
H is the height of the filter, and W is the width of the filter.
Parameters(strides, paddings, dilations) are three elements. These three elements
represent depth, height and width, respectively.
C
fix doc  
chengduoZH 已提交
312 313 314 315
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
316 317
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, D_f, H_f, W_f)$
C
fix doc  
chengduoZH 已提交
318
  Output:
C
chengduoZH 已提交
319 320 321 322 323 324 325
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
       D_{out}= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{ strides[0]}+ 1 \\
       H_{out}= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{ strides[1]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1
  $$
C
chengduoZH 已提交
326
)DOC");
Q
qingqing01 已提交
327
  Apply();
C
chengduoZH 已提交
328 329
}

C
chengduoZH 已提交
330 331 332 333 334 335 336 337 338 339 340
void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

341 342
framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
343
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
344 345 346 347
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
348
#ifdef PADDLE_WITH_CUDA
349 350
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
351 352
  }
#endif
353 354 355 356
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
357
    layout_ = framework::DataLayout::kMKLDNN;
358
  }
359
#endif
360

M
minqiyang 已提交
361 362
  return framework::OpKernelType(ctx.Input<Tensor>("Input")->type(),
                                 ctx.GetPlace(), layout_, library_);
363 364
}

C
chengduoZH 已提交
365 366 367 368
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
369
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
C
chengduo 已提交
370
                  ops::ConvOpInferVarType,
371 372
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
373 374

// depthwise convolution op
Y
Yang Yang 已提交
375
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
376 377
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
C
chengduo 已提交
378

Y
Yang Yang 已提交
379
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
C
chengduo 已提交
380
                  ops::ConvOpInferVarType,
381 382
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
C
chengduoZH 已提交
383

384 385
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile
Z
zlx 已提交
386
REGISTER_OP_CPU_KERNEL(
387
    depthwise_conv2d,
X
xzl 已提交
388 389 390 391
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(
392
    depthwise_conv2d_grad,
X
xzl 已提交
393 394
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
Z
zlx 已提交
395

C
chengduoZH 已提交
396
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
397 398 399 400 401 402
    conv2d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv2d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
403 404

REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
405 406 407 408 409 410
    conv3d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv3d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);