unary.cc 24.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/infermeta/unary.h"
16

17
#include <set>
18
#include "paddle/phi/common/data_type.h"
19
#include "paddle/phi/core/enforce.h"
20
#include "paddle/phi/core/infermeta_utils.h"
21
#include "paddle/phi/kernels/funcs/unfold_functor.h"
22

23
namespace phi {
24

25 26
void UnchangedInferMeta(const MetaTensor& x, MetaTensor* out) {
  out->share_meta(x);
27 28
}

29 30 31 32 33
void FlattenInferMeta(const MetaTensor& x,
                      int start_axis,
                      int stop_axis,
                      MetaTensor* out) {
  auto x_dims = x.dims();
34 35 36 37 38 39 40
  int in_dims_size = x_dims.size();
  if (start_axis < 0) {
    start_axis = start_axis + in_dims_size;
  }
  if (stop_axis < 0) {
    stop_axis = stop_axis + in_dims_size;
  }
41 42 43 44 45
  PADDLE_ENFORCE_GE(
      stop_axis,
      start_axis,
      phi::errors::InvalidArgument("The stop_axis should be greater"
                                   "than or equal to start_axis."));
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

  int64_t outer = 1;
  std::vector<int32_t> out_shape;
  out_shape.reserve(in_dims_size - stop_axis + start_axis);

  for (int i = 0; i < start_axis; ++i) {
    out_shape.push_back(x_dims[i]);
  }
  for (int i = start_axis; i <= stop_axis; i++) {
    if (x_dims[i] == -1 || outer == -1) {
      outer = -1;
    } else {
      outer *= x_dims[i];
    }
  }
  out_shape.push_back(outer);
  for (int i = stop_axis + 1; i < in_dims_size; i++) {
    out_shape.push_back(x_dims[i]);
  }
65
  const auto& out_dims = phi::make_ddim(out_shape);
66 67 68
  out->set_dims(out_dims);
  out->set_dtype(x.dtype());
  out->set_layout(x.layout());
69

70
  if (x_dims[0] == out_dims[0]) {
71 72
    // Only pass LoD when the first dimension of output and Input(X)
    // are the same.
73
    out->share_lod(x);
74 75 76
  }
}

77 78 79 80
void CastInferMeta(const MetaTensor& x, DataType out_dtype, MetaTensor* out) {
  out->set_dims(x.dims());
  out->set_dtype(out_dtype);
  out->set_layout(x.layout());
81 82
}

83 84 85 86 87 88 89
void CopyToInferMeta(const MetaTensor& x,
                     Backend backend,
                     bool blocking,
                     MetaTensor* out) {
  UnchangedInferMeta(x, out);
}

90
void CreateLikeInferMeta(const MetaTensor& x, DataType dtype, MetaTensor* out) {
91 92
  out->set_dims(x.dims());
  out->set_dtype(dtype == DataType::UNDEFINED ? x.dtype() : dtype);
93
  out->set_layout(x.layout());
94 95
}

96 97 98 99
static phi::DDim ValidateShape(const std::vector<int64_t> shape,
                               const phi::DDim& in_dims) {
  const int64_t in_size = phi::product(in_dims);
  auto in_dims_vec = phi::vectorize(in_dims);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
  bool all_positive = std::all_of(in_dims_vec.cbegin(),
                                  in_dims_vec.cend(),
                                  [](int64_t i) { return i > 0; });
  // only one dimension can be set to -1, whose size will be automatically
  // infered.
  const int64_t unk_dim_val = -1;
  const int64_t copy_dim_val = 0;

  std::vector<int64_t> output_shape(shape.size(), 0);
  int64_t capacity = 1;
  int unk_dim_idx = -1;
  for (size_t i = 0; i < shape.size(); ++i) {
    if (shape[i] == unk_dim_val) {
      PADDLE_ENFORCE_EQ(
          unk_dim_idx,
          -1,
116
          phi::errors::InvalidArgument(
117 118
              "Only one dimension value of 'shape' in ReshapeOp can "
              "be -1. But received shape = [%s], shape[%d] is also -1.",
119
              phi::make_ddim(shape),
120 121 122 123 124 125
              i));
      unk_dim_idx = i;
    } else if (shape[i] == copy_dim_val) {
      PADDLE_ENFORCE_LT(
          static_cast<int>(i),
          in_dims.size(),
126
          phi::errors::InvalidArgument(
127 128 129 130
              "The index of 0 in `shape` must be less than "
              "the input tensor X's dimensions. "
              "But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
              "X's dimensions = %d.",
131
              phi::make_ddim(shape),
132 133 134 135 136 137 138
              i,
              in_dims,
              in_dims.size()));
    } else {
      PADDLE_ENFORCE_GT(
          shape[i],
          0,
139
          phi::errors::InvalidArgument(
140 141 142
              "Each dimension value of 'shape' in ReshapeOp must not "
              "be negative except one unknown dimension. "
              "But received  shape = [%s], shape[%d] = %d.",
143
              phi::make_ddim(shape),
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
              i,
              shape[i]));
    }

    // NOTE all non-zero values will be converted to True (include negative
    // value)
    capacity *= (shape[i] ? shape[i] : in_dims[i]);
    output_shape[i] = (shape[i] ? static_cast<int64_t>(shape[i]) : in_dims[i]);
  }

  if (unk_dim_idx != -1) {
    if (all_positive) {
      // in_size < 0 and is un-determinate in compile time, skip the check,
      // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8],
      // capacity = -24, in_size = -8, output_shape[0] = 0
      // the following check will fail.
      output_shape[unk_dim_idx] = -in_size / capacity;
      PADDLE_ENFORCE_EQ(
          output_shape[unk_dim_idx] * capacity,
          -in_size,
164
          phi::errors::InvalidArgument(
165 166 167 168 169 170 171
              "The 'shape' attribute in ReshapeOp is invalid. "
              "The input tensor X'size must be divisible by known "
              "capacity of 'shape'. "
              "But received X's shape = [%s], X's size = %d, "
              "'shape' is [%s], known capacity of 'shape' is %d.",
              in_dims,
              in_size,
172
              phi::make_ddim(shape),
173 174 175 176 177 178 179 180 181
              capacity));
    } else {
      output_shape[unk_dim_idx] = -1;
    }
  } else {
    if (all_positive) {
      PADDLE_ENFORCE_EQ(
          capacity,
          in_size,
182
          phi::errors::InvalidArgument(
183 184 185 186 187 188 189
              "The 'shape' in ReshapeOp is invalid. "
              "The input tensor X'size must be equal to the capacity of "
              "'shape'. "
              "But received X's shape = [%s], X's size = %d, 'shape' is "
              "[%s], the capacity of 'shape' is %d.",
              in_dims,
              in_size,
190
              phi::make_ddim(shape),
191 192 193 194 195 196 197 198 199 200 201
              capacity));
    }
  }

  // support reshape with zero-input(input tensor with product(shape) == 0)
  // by now we require that if the input tensor is zero shape, the target
  // shape of output must be zero
  if (in_size == 0) {
    PADDLE_ENFORCE_LE(
        capacity,
        in_size,
202
        phi::errors::InvalidArgument(
203 204 205 206 207 208
            "The 'shape' in ReshapeOp is invalid. "
            "The input tensor X's shape = [%s], X's capacity = %d."
            "But the target shape of Out is [%s],  the "
            "capacity of 'Out' is %d.",
            in_dims,
            in_size,
209
            phi::make_ddim(shape),
210 211 212
            capacity));
  }

213
  return phi::make_ddim(output_shape);
214 215
}

216 217 218
void InferMetaFromVecValue(const MetaTensor& x,
                           const std::vector<int64_t>& shape,
                           MetaTensor* out) {
219 220
  PADDLE_ENFORCE_EQ(!shape.empty(),
                    true,
221
                    phi::errors::InvalidArgument(
222 223
                        "The parameter 'shape' in ReshapeOp must be set. "
                        "But received 'shape' is empty."));
224
  auto x_dims = x.dims();
225
  auto out_dims = ValidateShape(shape, x_dims);
226 227 228 229
  out->set_dims(out_dims);
  out->set_dtype(x.dtype());
  out->set_layout(x.layout());
  if (x_dims[0] == out_dims[0]) {
230 231
    // Only pass LoD when the first dimension of output and Input(X)
    // are the same.
232
    out->share_lod(x);
233 234 235
  }
}

236 237
void ReshapeInferMeta(const MetaTensor& x,
                      const ScalarArray& shape,
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
                      MetaTensor* out,
                      MetaConfig config) {
  auto& shape_data = shape.GetData();
  PADDLE_ENFORCE_NOT_NULL(out,
                          phi::errors::InvalidArgument(
                              "Output(Out) of ReshapeOp should not be null."));
  if (!config.is_runtime && shape.FromTensor()) {
    out->set_dims(phi::make_ddim(shape_data));
    out->share_lod(x);
    return;
  }
  PADDLE_ENFORCE_GT(shape_data.size(),
                    0,
                    phi::errors::InvalidArgument(
                        "The shape's size in ReshapeOp can't be zero."));
  InferMetaFromVecValue(x, shape_data, out);
}

void ReshapeWithXShapeInferMeta(const MetaTensor& x,
                                const ScalarArray& shape,
                                MetaTensor* xshape,
                                MetaTensor* out,
                                MetaConfig config) {
  PADDLE_ENFORCE_NOT_NULL(
      xshape,
      phi::errors::InvalidArgument(
          "Output(XShape) of ReshapeOp should not be null."));
  const auto& x_dims = x.dims();
  std::vector<int64_t> xshape_dims(x_dims.size() + 1);
  xshape_dims[0] = 0;
  for (int i = 0; i < x_dims.size(); ++i) {
    xshape_dims[i + 1] = x_dims[i];
  }
  xshape->set_dims(phi::make_ddim(xshape_dims));
  xshape->share_lod(x);
  ReshapeInferMeta(x, shape, out, config);
274 275
}

276 277 278
/*  Why not use ReduceInferMeta directly?
    Because we need make InferMetaFunction's args follow the design of api.yaml
*/
279 280 281 282 283
void SumInferMeta(const MetaTensor& x,
                  const std::vector<int64_t>& axis,
                  DataType dtype,
                  bool keep_dim,
                  MetaTensor* out) {
284
  ReduceInferMetaBase(x, axis, keep_dim, dtype, out);
285 286
}

287 288 289 290 291
void ReduceInferMetaBase(const MetaTensor& x,
                         const std::vector<int64_t>& axis,
                         bool keep_dim,
                         DataType dtype,
                         MetaTensor* out) {
292 293
  bool reduce_all = true;
  std::set<int64_t> dims_set(axis.begin(), axis.end());
294
  for (int64_t i = 0; i < x.dims().size(); ++i) {
295 296 297 298 299 300 301 302
    if (dims_set.find(i) == dims_set.end()) {
      reduce_all = false;
      break;
    }
  }

  std::vector<int64_t> out_dim_vector;
  if (keep_dim) {
303
    for (int64_t i = 0; i < x.dims().size(); ++i) {
304 305 306
      if (reduce_all || dims_set.find(i) != dims_set.end()) {
        out_dim_vector.push_back(1);
      } else {
307
        out_dim_vector.push_back(x.dims().at(i));
308 309 310
      }
    }
  } else {
311
    for (int64_t i = 0; i < x.dims().size(); ++i) {
312 313 314
      if (reduce_all || dims_set.find(i) != dims_set.end()) {
        continue;
      } else {
315
        out_dim_vector.push_back(x.dims().at(i));
316 317 318 319 320 321 322
      }
    }

    if (out_dim_vector.size() == 0) {
      out_dim_vector.push_back(1);
    }
  }
323
  DDim out_dim = phi::make_ddim(out_dim_vector);
324

325 326 327 328
  DataType out_dtype;
  if (dtype != DataType::UNDEFINED) {
    out_dtype = dtype;
  } else {
329 330
    if (x.dtype() == DataType::BOOL || x.dtype() == DataType::INT32 ||
        x.dtype() == DataType::INT64) {
331 332
      out_dtype = DataType::INT64;
    } else {
333
      out_dtype = x.dtype();
334
    }
335 336
  }

337 338 339 340 341 342 343 344 345
  out->set_dims(out_dim);
  out->set_dtype(out_dtype);
  out->set_layout(x.layout());
}

void ReduceInferMeta(const MetaTensor& x,
                     const std::vector<int64_t>& axis,
                     bool keep_dim,
                     MetaTensor* out) {
346
  ReduceInferMetaBase(x, axis, keep_dim, DataType::UNDEFINED, out);
347 348
}

349 350 351 352 353 354 355 356
void TransferLayoutInferMeta(const MetaTensor& x,
                             DataLayout layout,
                             MetaTensor* out) {
  out->set_dims(x.dims());
  out->set_dtype(x.dtype());
  out->set_layout(layout);
}

C
chentianyu03 已提交
357 358 359 360 361 362 363 364 365 366
void SplitInferMeta(const MetaTensor& x,
                    const ScalarArray& num_or_sections,
                    const Scalar& axis,
                    std::vector<MetaTensor>* out,
                    MetaConfig config) {
  int axis_value = axis.to<int>();
  int rank = x.dims().size();
  PADDLE_ENFORCE_EQ(
      axis_value >= -rank && axis_value < rank,
      true,
367
      phi::errors::InvalidArgument(
C
chentianyu03 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
          "The axis is expected to be in range of [%d, %d), but got %d",
          -rank,
          rank,
          axis_value));
  if (axis_value < 0) {
    axis_value = axis_value + rank;
  }

  auto input_axis_dim = x.dims().at(axis_value);
  auto num_or_sections_data = num_or_sections.GetData();
  // step1: get formated sections
  std::vector<int64_t> sections;
  // num_or_sections is a number
  if (num_or_sections_data.size() == 1) {
    int num = num_or_sections_data.at(0);

    PADDLE_ENFORCE_EQ(input_axis_dim % num,
                      0,
386
                      phi::errors::InvalidArgument(
C
chentianyu03 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
                          "The input's size along the split dimension "
                          "must be evenly divisible by Attr(num_or_sections). "
                          "But received Attr(num_or_sections) "
                          "= %d, input(X)'s shape = [%s], Attr(dim) = %d.",
                          num,
                          x.dims(),
                          axis_value));

    for (int i = 0; i < num; ++i) {
      sections.push_back(input_axis_dim / num);
    }
  } else {
    // num_or_sections is a sections
    const int unknow_dim_val = -1;
    int unknow_dim_idx = -1;
    int num_of_unknow = 0;
    int sum_of_section = 0;

    for (size_t i = 0; i < num_or_sections_data.size(); ++i) {
      sections.push_back(num_or_sections_data[i]);

      if (num_or_sections_data[i] == unknow_dim_val) {
        num_of_unknow++;
        unknow_dim_idx = i;
      } else {
        sum_of_section += num_or_sections_data[i];
      }
    }

    if (config.is_runtime) {
      PADDLE_ENFORCE_LE(num_of_unknow,
                        1,
419
                        phi::errors::InvalidArgument(
C
chentianyu03 已提交
420 421 422
                            "Only one dimension value of Attr(num_or_sections) "
                            "in SplitOp can be -1. "
                            "But received Attr(num_or_sections) = [%s].",
423
                            phi::make_ddim(num_or_sections_data)));
C
chentianyu03 已提交
424 425 426 427 428 429 430 431 432
    }

    if (unknow_dim_idx != -1) {
      // for example, input shape = [4 ,5], axis = 1, sections = [2, 3, -1].
      // input_axis_dim = 5, sum_of_sections = 5.
      // the following check will fail.
      PADDLE_ENFORCE_LT(
          sum_of_section,
          input_axis_dim,
433
          phi::errors::InvalidArgument(
C
chentianyu03 已提交
434 435 436 437 438
              "Sum of Attr(num_or_sections) other than unknown section "
              "must be less than the input's "
              "size "
              "along the split dimension. But received Attr(num_or_sections) "
              "= [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
439
              phi::make_ddim(num_or_sections_data),
C
chentianyu03 已提交
440 441 442 443 444 445 446 447 448 449
              x.dims(),
              axis_value));

      if (config.is_runtime) {
        sections[unknow_dim_idx] = input_axis_dim - sum_of_section;
      }
    } else {
      PADDLE_ENFORCE_EQ(
          sum_of_section,
          input_axis_dim,
450
          phi::errors::InvalidArgument(
C
chentianyu03 已提交
451 452 453 454
              "Sum of Attr(num_or_sections) must be equal to the input's "
              "size "
              "along the split dimension. But received Attr(num_or_sections)"
              " = [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
455
              phi::make_ddim(num_or_sections_data),
C
chentianyu03 已提交
456 457 458 459 460 461
              x.dims(),
              axis_value));
    }
  }

  // setp2: fill out dims
462
  std::vector<phi::DDim> out_dims(sections.size(), x.dims());
C
chentianyu03 已提交
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
  if (config.is_runtime || input_axis_dim > 0) {
    for (size_t i = 0; i < sections.size(); ++i) {
      out_dims[i][axis_value] = sections[i];
    }
  } else {
    for (size_t i = 0; i < sections.size(); ++i) {
      out_dims[i][axis_value] = -1;
    }
  }

  for (size_t i = 0; i < sections.size(); ++i) {
    if (axis_value != 0) {
      // Only pass LoD when not spliting along the first dim.
      (*out)[i].set_dtype(x.dtype());
      (*out)[i].set_dims(out_dims[i]);
      (*out)[i].set_layout(x.layout());
    } else {
      (*out)[i].set_dtype(x.dtype());
      (*out)[i].set_dims(out_dims[i]);
      (*out)[i].set_layout(x.layout());
      (*out)[i].share_lod(x);
    }
  }
C
Chen Weihang 已提交
486 487
}

L
Leo Chen 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
void UnbindInferMeta(const MetaTensor& x,
                     int axis,
                     std::vector<MetaTensor>* outs) {
  auto in_dims = x.dims();
  std::vector<int> out_dim;
  axis = axis < 0 ? in_dims.size() + axis : axis;
  for (int i = 0; i < in_dims.size(); ++i) {
    if (i != axis) out_dim.push_back(in_dims[i]);
  }
  auto out_dims = phi::make_ddim(out_dim);

  for (size_t i = 0; i < outs->size(); ++i) {
    (*outs)[i].set_dtype(x.dtype());
    (*outs)[i].set_dims(out_dims);
    (*outs)[i].set_layout(x.layout());
    (*outs)[i].share_lod(x);
  }
}

C
Chen Weihang 已提交
507 508 509 510 511 512
void TraceInferMeta(
    const MetaTensor& x, int offset, int axis1, int axis2, MetaTensor* out) {
  int dim1 = axis1;
  int dim2 = axis2;

  auto x_dims = x.dims();
C
chentianyu03 已提交
513

C
Chen Weihang 已提交
514 515 516 517 518 519
  int dim1_ = dim1 < 0 ? x_dims.size() + dim1 : dim1;
  int dim2_ = dim2 < 0 ? x_dims.size() + dim2 : dim2;

  PADDLE_ENFORCE_GE(
      x_dims.size(),
      2,
520
      phi::errors::OutOfRange(
C
Chen Weihang 已提交
521 522 523 524 525
          "Input's dim is out of range (expected at least 2, but got %ld).",
          x_dims.size()));
  PADDLE_ENFORCE_LT(
      dim1_,
      x_dims.size(),
526
      phi::errors::OutOfRange(
C
Chen Weihang 已提交
527 528 529 530 531 532 533 534
          "Attr(dim1) is out of range (expected to be in range of [%ld, "
          "%ld], but got %ld).",
          -(x_dims.size()),
          (x_dims.size() - 1),
          dim1));
  PADDLE_ENFORCE_LT(
      dim2_,
      x_dims.size(),
535
      phi::errors::OutOfRange(
C
Chen Weihang 已提交
536 537 538 539 540 541 542 543
          "Attr(dim2) is out of range (expected to be in range of [%ld, "
          "%ld], but got %ld).",
          -(x_dims.size()),
          (x_dims.size() - 1),
          dim2));
  PADDLE_ENFORCE_NE(
      dim1_,
      dim2_,
544 545 546 547
      phi::errors::InvalidArgument("The dimensions should not be identical "
                                   "%ld vs %ld.",
                                   dim1,
                                   dim2));
C
Chen Weihang 已提交
548 549 550 551 552 553 554 555 556

  auto sizes = vectorize(x_dims);
  if (x_dims.size() == 2) {
    sizes.clear();
    sizes.push_back(1);
  } else {
    sizes.erase(sizes.begin() + std::max(dim1_, dim2_));
    sizes.erase(sizes.begin() + std::min(dim1_, dim2_));
  }
557
  out->set_dims(phi::make_ddim(sizes));
C
chentianyu03 已提交
558 559
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
void UnfoldInferMeta(const MetaTensor& x,
                     const std::vector<int>& kernel_sizes,
                     const std::vector<int>& strides,
                     const std::vector<int>& paddings,
                     const std::vector<int>& dilations,
                     MetaTensor* out,
                     MetaConfig config) {
  auto in_dims = x.dims();
  // Only [N, C, H, W] input supported now
  PADDLE_ENFORCE_EQ(
      in_dims.size(),
      4,
      phi::errors::InvalidArgument(
          "Input should be 4-D tensor of format [N, C, H, W], but get %u",
          in_dims.size()));
  PADDLE_ENFORCE_EQ(
      in_dims.size() - kernel_sizes.size(),
      2U,
      phi::errors::InvalidArgument(
          "The dims of X should be larger than that of kernel_sizes "
          "by a number of 2, due to the batch size and input channel dim. "
          "But recieved dims(X:%u) - dims(kernel_sizes:%u) != 2",
          in_dims.size(),
          kernel_sizes.size()));
  PADDLE_ENFORCE_EQ(
      strides.size(),
      kernel_sizes.size(),
      phi::errors::InvalidArgument(
          "The dims of strides should be the same with that of kernel_sizes. "
          "But recieved dims(strides: %u) != dims(kernel_sizes: %u).",
          strides.size(),
          kernel_sizes.size()));
  PADDLE_ENFORCE_EQ(
      paddings.size(),
      2 * strides.size(),
      phi::errors::InvalidArgument(
          "The dims of paddings should be 2 times of that of strides. "
          "But recieved dims(paddings: %u) != 2*dims(strides: %u).",
          paddings.size(),
          strides.size()));
  PADDLE_ENFORCE_EQ(
      strides.size(),
      dilations.size(),
      phi::errors::InvalidArgument(
          "The dims of strides should be the same with that of dilations. "
          "But recieved dims(strides: %u) != dims(dilations: %u).",
          strides.size(),
          dilations.size()));

  // check kernel_sizes
  PADDLE_ENFORCE_GT(kernel_sizes[0],
                    0,
                    phi::errors::InvalidArgument(
                        "The `kernel_sizes` should be greater than zero, "
                        "but recieved kernel_height: %d kernel_width: %d.",
                        kernel_sizes[0],
                        kernel_sizes[1]));
  PADDLE_ENFORCE_GT(kernel_sizes[1],
                    0,
                    phi::errors::InvalidArgument(
                        "The `kernel_sizes` should be greater than zero, "
                        "but recieved kernel_height: %d kernel_width: %d.",
                        kernel_sizes[0],
                        kernel_sizes[1]));
  // check strides
  PADDLE_ENFORCE_GT(strides[0],
                    0,
                    phi::errors::InvalidArgument(
                        "The `strides` should be greater than zero, "
                        "but recieved strides_height: %d strides_width: %d.",
                        strides[0],
                        strides[1]));
  PADDLE_ENFORCE_GT(strides[1],
                    0,
                    phi::errors::InvalidArgument(
                        "The `strides` should be greater than zero, "
                        "but recieved strides_height: %d strides_width: %d.",
                        strides[0],
                        strides[1]));
  // check dilations
  PADDLE_ENFORCE_GT(
      dilations[0],
      0,
      phi::errors::InvalidArgument(
          "The `dilations` should be greater than zero, "
          "but recieved dilations_height: %d dilations_width: %d.",
          dilations[0],
          dilations[1]));
  PADDLE_ENFORCE_GT(
      dilations[1],
      0,
      phi::errors::InvalidArgument(
          "The `dilations` should be greater than zero, "
          "but recieved dilations_height: %d dilations_width: %d.",
          dilations[0],
          dilations[1]));

  std::vector<int> out_dims;
  out_dims.push_back(in_dims[0]);
  int output_channels = in_dims[1] * kernel_sizes[0] * kernel_sizes[1];
  out_dims.push_back(output_channels);

  int output_height = phi::funcs::CalcOutputSize(in_dims[2],
                                                 kernel_sizes[0],
                                                 dilations[0],
                                                 paddings[0],
                                                 paddings[2],
                                                 strides[0]);
  int output_width = phi::funcs::CalcOutputSize(in_dims[3],
                                                kernel_sizes[1],
                                                dilations[1],
                                                paddings[1],
                                                paddings[3],
                                                strides[1]);
  if (config.is_runtime) {
    // only check output height and width in runtime
    PADDLE_ENFORCE_GT(
        output_height,
        0,
        phi::errors::InvalidArgument(
            "The sliding blocks calculated from input spatial size "
            "(%d, %d), kernel_sizes (%d, %d), strides (%d, %d), "
            "dilations (%d, %d), is (%d, %d), which should be a "
            "positive integer.",
            in_dims[2],
            in_dims[3],
            kernel_sizes[0],
            kernel_sizes[1],
            strides[0],
            strides[1],
            dilations[0],
            dilations[1],
            output_height,
            output_width));
    PADDLE_ENFORCE_GT(
        output_width,
        0,
        phi::errors::InvalidArgument(
            "The sliding blocks calculated from input spatial size "
            "(%d, %d), kernel_sizes (%d, %d), strides (%d, %d), "
            "dilations (%d, %d), is (%d, %d), which should be a "
            "positive integer.",
            in_dims[2],
            in_dims[3],
            kernel_sizes[0],
            kernel_sizes[1],
            strides[0],
            strides[1],
            dilations[0],
            dilations[1],
            output_height,
            output_width));
  }
  int output_col_length = output_height * output_width;
  out_dims.push_back(output_col_length);
  out->set_dims(phi::make_ddim(out_dims));
}

718
}  // namespace phi
719

720 721
PD_REGISTER_INFER_META_FN(copy_to, phi::CopyToInferMeta);
PD_REGISTER_INFER_META_FN(split, phi::SplitInferMeta);