提交 4d42f4fa 编写于 作者: P phlrain

update

上级 263b4773
...@@ -41,9 +41,12 @@ void SetValueCompute(const framework::ExecutionContext& ctx, ...@@ -41,9 +41,12 @@ void SetValueCompute(const framework::ExecutionContext& ctx,
auto dtype = framework::TransToProtoVarType(in->dtype()); auto dtype = framework::TransToProtoVarType(in->dtype());
auto in_dims = in->dims(); auto in_dims = in->dims();
CheckAndUpdateSliceAttrs<int64_t>(in_dims, axes, starts, ends, &steps); phi::funcs::CheckAndUpdateSliceAttrs<int64_t>(in_dims, axes, starts, ends,
auto slice_dims = GetSliceDims(in_dims, axes, *starts, *ends, &steps); &steps);
auto decrease_slice_dims = GetDecreasedDims(slice_dims, decrease_axes); auto slice_dims =
phi::funcs::GetSliceDims(in_dims, axes, *starts, *ends, &steps);
auto decrease_slice_dims =
phi::funcs::GetDecreasedDims(slice_dims, decrease_axes);
auto slice_dims_for_assign = decrease_slice_dims; auto slice_dims_for_assign = decrease_slice_dims;
if (!none_axes.empty()) { if (!none_axes.empty()) {
...@@ -281,10 +284,10 @@ void SliceCompute(const framework::ExecutionContext& ctx, ...@@ -281,10 +284,10 @@ void SliceCompute(const framework::ExecutionContext& ctx,
} }
} }
CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends); phi::funcs::CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends);
slice_dims = slice_dims = phi::funcs::GetSliceDims<int64_t>(in_dims, axes, starts, ends,
GetSliceDims<int64_t>(in_dims, axes, starts, ends, nullptr, nullptr); nullptr, nullptr);
out_dims = GetDecreasedDims(slice_dims, decrease_axis); out_dims = phi::funcs::GetDecreasedDims(slice_dims, decrease_axis);
// 2.2 Get output // 2.2 Get output
auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>(); auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>();
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include "paddle/fluid/operators/assign_value_op.h" #include "paddle/fluid/operators/assign_value_op.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/slice_utils.h"
#include "paddle/fluid/operators/strided_slice_op.h" #include "paddle/fluid/operators/strided_slice_op.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/kernels/funcs/slice_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -188,9 +188,11 @@ class SetValueKernel : public framework::OpKernel<T> { ...@@ -188,9 +188,11 @@ class SetValueKernel : public framework::OpKernel<T> {
} }
auto in_dims = in->dims(); auto in_dims = in->dims();
CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends, &steps); phi::funcs::CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends, &steps);
auto slice_dims = GetSliceDims(in_dims, axes, starts, ends, &steps); auto slice_dims =
auto decrease_slice_dims = GetDecreasedDims(slice_dims, decrease_axes); phi::funcs::GetSliceDims(in_dims, axes, starts, ends, &steps);
auto decrease_slice_dims =
phi::funcs::GetDecreasedDims(slice_dims, decrease_axes);
auto slice_dims_for_assign = decrease_slice_dims; auto slice_dims_for_assign = decrease_slice_dims;
if (!none_axes.empty()) { if (!none_axes.empty()) {
......
...@@ -17,6 +17,7 @@ limitations under the License. */ ...@@ -17,6 +17,7 @@ limitations under the License. */
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/phi/kernels/funcs/slice_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -101,15 +102,17 @@ class SliceOp : public framework::OperatorWithKernel { ...@@ -101,15 +102,17 @@ class SliceOp : public framework::OperatorWithKernel {
"The size of ends must be equal to the size of axes.")); "The size of ends must be equal to the size of axes."));
} }
CheckAndUpdateSliceAttrs<int>(in_dims, axes, &starts, &ends, nullptr, phi::funcs::CheckAndUpdateSliceAttrs<int>(in_dims, axes, &starts, &ends,
&infer_flags); nullptr, &infer_flags);
auto slice_dims = auto slice_dims = phi::funcs::GetSliceDims<int>(in_dims, axes, starts, ends,
GetSliceDims<int>(in_dims, axes, starts, ends, nullptr, &infer_flags); nullptr, &infer_flags);
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
out_dims = GetDecreasedDims<int>(slice_dims, decrease_axis, &infer_flags); out_dims = phi::funcs::GetDecreasedDims<int>(slice_dims, decrease_axis,
&infer_flags);
} else { } else {
out_dims = GetDecreasedDims<int>(slice_dims, decrease_axis, nullptr); out_dims =
phi::funcs::GetDecreasedDims<int>(slice_dims, decrease_axis, nullptr);
} }
ctx->SetOutputDim("Out", out_dims); ctx->SetOutputDim("Out", out_dims);
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/slice_utils.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
......
...@@ -29,5 +29,4 @@ PD_REGISTER_KERNEL(slice_grad, ...@@ -29,5 +29,4 @@ PD_REGISTER_KERNEL(slice_grad,
double, double,
phi::dtype::complex<float>, phi::dtype::complex<float>,
phi::dtype::complex<double>, phi::dtype::complex<double>,
phi::dtype::bfloat16, phi::dtype::bfloat16) {}
phi::dtype::float16) {}
...@@ -19,6 +19,8 @@ limitations under the License. */ ...@@ -19,6 +19,8 @@ limitations under the License. */
namespace phi { namespace phi {
namespace funcs {
template <typename T = int64_t> template <typename T = int64_t>
inline void CheckAndUpdateSliceAttrs(const DDim in_dims, inline void CheckAndUpdateSliceAttrs(const DDim in_dims,
const std::vector<T>& axes, const std::vector<T>& axes,
...@@ -161,4 +163,5 @@ inline DDim GetDecreasedDims(const DDim slice_dims, ...@@ -161,4 +163,5 @@ inline DDim GetDecreasedDims(const DDim slice_dims,
return decreased_dims; return decreased_dims;
} }
} // namespace funcs
} // namespace phi } // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/impl/slice_grad_kernel_impl.h"
#include "paddle/phi/kernels/slice_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(slice_grad,
GPU,
ALL_LAYOUT,
phi::SliceGradRawKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::complex<float>,
phi::dtype::complex<double>,
phi::dtype::bfloat16,
phi::dtype::float16) {}
...@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice, ...@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice,
double, double,
phi::dtype::complex<float>, phi::dtype::complex<float>,
phi::dtype::complex<double>, phi::dtype::complex<double>,
phi::dtype::bfloat16) {} phi::dtype::bfloat16,
phi::dtype::float16) {}
...@@ -66,136 +66,143 @@ void EigenPaddingCompute( ...@@ -66,136 +66,143 @@ void EigenPaddingCompute(
// if dimension less than 3, cannot reduce dimension // if dimension less than 3, cannot reduce dimension
LaunchEigenPadding<T, Context, D>( LaunchEigenPadding<T, Context, D>(
context, d_input, in_dims, d_out, out_dims, paddings); context, d_input, in_dims, d_out, out_dims, paddings);
} } else { // else we can reduce dimension
// } else { // else we can reduce dimension // count not-zero padding number, and record the dimension
// // count not-zero padding number, and record the dimension int need_pad_num = 0, pad_dim = -1;
// int need_pad_num = 0, pad_dim = -1; for (size_t i = 0; i < D; i++) {
// for (size_t i = 0; i < D; i++) { if (paddings[i].first != 0 || paddings[i].second != 0) {
// if (paddings[i].first != 0 || paddings[i].second != 0) { need_pad_num++;
// need_pad_num++; pad_dim = i;
// pad_dim = i; }
// } }
// }
// if (need_pad_num == 1) { if (need_pad_num == 1) {
// // only need padding one dimension, we can reduce dimension. // only need padding one dimension, we can reduce dimension.
// // only the padding dimension is available for us. // only the padding dimension is available for us.
// // How to reduce dimension(5 to 3 for example): // How to reduce dimension(5 to 3 for example):
// // before(D=5): // before(D=5):
// // in_dims: [x1, x2, x3, x4, x5] // in_dims: [x1, x2, x3, x4, x5]
// // padding.first: [0, 0, a, 0, 0] // padding.first: [0, 0, a, 0, 0]
// // padding.second: [0, 0, b, 0, 0] // padding.second: [0, 0, b, 0, 0]
// // | | // | |
// // V V // V V
// // after(D=3): // after(D=3):
// // reshaped_in_dims: [x1*x2, x3, x4*x5] // reshaped_in_dims: [x1*x2, x3, x4*x5]
// // reshaped_padding.first: [0, a, 0] // reshaped_padding.first: [0, a, 0]
// // reshaped_padding.second: [0, b, 0] // reshaped_padding.second: [0, b, 0]
// if (pad_dim == D - 1) { if (pad_dim == D - 1) {
// // only last dimension need padding, // only last dimension need padding,
// // reshape the dimension of tensor in 2: [preceding, padding] // reshape the dimension of tensor in 2: [preceding, padding]
// std::vector<int64_t> in_tore_shape(2, 1), out_tore_shape(2, 1); std::vector<int64_t> in_tore_shape(2, 1), out_tore_shape(2, 1);
// Eigen::array<std::pair<int64_t, int64_t>, 2> reshaped_padding; Eigen::array<std::pair<int64_t, int64_t>, 2> reshaped_padding;
// // first dimension is the accumulate of preceding dimension // first dimension is the accumulate of preceding dimension
// for (int i = 0; i < pad_dim; i++) { for (int i = 0; i < pad_dim; i++) {
// in_tore_shape[0] *= in_dims[i]; in_tore_shape[0] *= in_dims[i];
// out_tore_shape[0] *= out_dims[i]; out_tore_shape[0] *= out_dims[i];
// } }
// // second dimension is the padding dimension // second dimension is the padding dimension
// in_tore_shape[1] = in_dims[pad_dim]; in_tore_shape[1] = in_dims[pad_dim];
// out_tore_shape[1] = out_dims[pad_dim]; out_tore_shape[1] = out_dims[pad_dim];
// // convert array from std::vector to DDim // convert array from std::vector to DDim
// DDim reshaped_in_dims = make_ddim(in_tore_shape); DDim reshaped_in_dims = make_ddim(in_tore_shape);
// DDim reshaped_out_dims = make_ddim(out_tore_shape); DDim reshaped_out_dims = make_ddim(out_tore_shape);
// // after reshape: the first dimension do not need padding, // after reshape: the first dimension do not need padding,
// // set padding[0] zero // set padding[0] zero
// reshaped_padding[0].first = reshaped_padding[0].second = 0; reshaped_padding[0].first = reshaped_padding[0].second = 0;
// // the second dimension is the previous padding dimension // the second dimension is the previous padding dimension
// reshaped_padding[1].first = paddings[pad_dim].first; reshaped_padding[1].first = paddings[pad_dim].first;
// reshaped_padding[1].second = paddings[pad_dim].second; reshaped_padding[1].second = paddings[pad_dim].second;
// LaunchEigenPadding<T, Context, D>(context, d_input, reshaped_in_dims, LaunchEigenPadding<T, Context>(context,
// d_out, d_input,
// reshaped_out_dims, reshaped_padding); reshaped_in_dims,
// } else if (pad_dim == 0) { d_out,
// // only first dimension need padding, reshaped_out_dims,
// // reshape the dimension of tensor in 2: [padding, succeeding] reshaped_padding);
// // similar to (D - 1) } else if (pad_dim == 0) {
// std::vector<int64_t> in_tore_shape(2, 1), out_tore_shape(2, 1); // only first dimension need padding,
// Eigen::array<std::pair<int64_t, int64_t>, 2> reshaped_padding; // reshape the dimension of tensor in 2: [padding, succeeding]
// similar to (D - 1)
std::vector<int64_t> in_tore_shape(2, 1), out_tore_shape(2, 1);
Eigen::array<std::pair<int64_t, int64_t>, 2> reshaped_padding;
// // first dimension is the padding dimension // first dimension is the padding dimension
// in_tore_shape[0] = in_dims[pad_dim]; in_tore_shape[0] = in_dims[pad_dim];
// out_tore_shape[0] = out_dims[pad_dim]; out_tore_shape[0] = out_dims[pad_dim];
// // sencond dimension is the accumulate of succeeding dimension // sencond dimension is the accumulate of succeeding dimension
// for (size_t i = pad_dim + 1; i < D; i++) { for (size_t i = pad_dim + 1; i < D; i++) {
// in_tore_shape[1] *= in_dims[i]; in_tore_shape[1] *= in_dims[i];
// out_tore_shape[1] *= out_dims[i]; out_tore_shape[1] *= out_dims[i];
// } }
// // convert array from std::vector to DDim // convert array from std::vector to DDim
// DDim reshaped_in_dims = make_ddim(in_tore_shape); DDim reshaped_in_dims = make_ddim(in_tore_shape);
// DDim reshaped_out_dims = make_ddim(out_tore_shape); DDim reshaped_out_dims = make_ddim(out_tore_shape);
// // after reshape: // after reshape:
// // the first dimension is the previous padding dimension // the first dimension is the previous padding dimension
// reshaped_padding[0].first = paddings[pad_dim].first; reshaped_padding[0].first = paddings[pad_dim].first;
// reshaped_padding[0].second = paddings[pad_dim].second; reshaped_padding[0].second = paddings[pad_dim].second;
// // the second dimension do not need padding, set padding[1] zero // the second dimension do not need padding, set padding[1] zero
// reshaped_padding[1].first = reshaped_padding[1].second = 0; reshaped_padding[1].first = reshaped_padding[1].second = 0;
// LaunchEigenPadding<T, Context, D>(context, d_input, reshaped_in_dims, LaunchEigenPadding<T, Context>(context,
// d_out, d_input,
// reshaped_out_dims, reshaped_padding); reshaped_in_dims,
// } else { d_out,
// // other dimension need padding reshaped_out_dims,
// // reshape the dimension of tensor in 3: reshaped_padding);
// // [preceding, padding, succeeding] } else {
// std::vector<int64_t> in_tore_shape(3, 1), out_tore_shape(3, 1); // other dimension need padding
// Eigen::array<std::pair<int64_t, int64_t>, 3> reshaped_padding; // reshape the dimension of tensor in 3:
// [preceding, padding, succeeding]
std::vector<int64_t> in_tore_shape(3, 1), out_tore_shape(3, 1);
Eigen::array<std::pair<int64_t, int64_t>, 3> reshaped_padding;
// // first dimension is the accumulate of preceding dimension // first dimension is the accumulate of preceding dimension
// for (int i = 0; i < pad_dim; i++) { for (int i = 0; i < pad_dim; i++) {
// in_tore_shape[0] *= in_dims[i]; in_tore_shape[0] *= in_dims[i];
// out_tore_shape[0] *= out_dims[i]; out_tore_shape[0] *= out_dims[i];
// } }
// // second dimension is the padding dimension // second dimension is the padding dimension
// in_tore_shape[1] = in_dims[pad_dim]; in_tore_shape[1] = in_dims[pad_dim];
// out_tore_shape[1] = out_dims[pad_dim]; out_tore_shape[1] = out_dims[pad_dim];
// // third dimension is the accumulate of succeeding dimension // third dimension is the accumulate of succeeding dimension
// for (size_t i = pad_dim + 1; i < D; i++) { for (size_t i = pad_dim + 1; i < D; i++) {
// in_tore_shape[2] *= in_dims[i]; in_tore_shape[2] *= in_dims[i];
// out_tore_shape[2] *= out_dims[i]; out_tore_shape[2] *= out_dims[i];
// } }
// // convert array from std::vector to DDim // convert array from std::vector to DDim
// DDim reshaped_in_dims = make_ddim(in_tore_shape); DDim reshaped_in_dims = make_ddim(in_tore_shape);
// DDim reshaped_out_dims = make_ddim(out_tore_shape); DDim reshaped_out_dims = make_ddim(out_tore_shape);
// // after reshape: // after reshape:
// // the first dimension do not need padding, set padding[0] zero // the first dimension do not need padding, set padding[0] zero
// reshaped_padding[0].first = reshaped_padding[2].second = 0; reshaped_padding[0].first = reshaped_padding[2].second = 0;
// // the second dimension is the previous padding dimension // the second dimension is the previous padding dimension
// reshaped_padding[1].first = paddings[pad_dim].first; reshaped_padding[1].first = paddings[pad_dim].first;
// reshaped_padding[1].second = paddings[pad_dim].second; reshaped_padding[1].second = paddings[pad_dim].second;
// // the third dimension do not need padding, set padding[2] zero // the third dimension do not need padding, set padding[2] zero
// reshaped_padding[2].first = reshaped_padding[2].second = 0; reshaped_padding[2].first = reshaped_padding[2].second = 0;
// LaunchEigenPadding<T, Context, D>(context, d_input, reshaped_in_dims, LaunchEigenPadding<T, Context>(context,
// d_out, d_input,
// reshaped_out_dims, reshaped_padding); reshaped_in_dims,
// } d_out,
// } else { reshaped_out_dims,
// // need padding at many dimension, cannot reduce dimension reshaped_padding);
// LaunchEigenPadding<T, Context, D>(context, d_input, in_dims, d_out, }
// out_dims, } else {
// paddings); // need padding at many dimension, cannot reduce dimension
// } LaunchEigenPadding<T, Context>(
// } context, d_input, in_dims, d_out, out_dims, paddings);
}
}
} }
template <typename T, typename Context, size_t D> template <typename T, typename Context, size_t D>
......
...@@ -60,10 +60,10 @@ void SliceCompute(const Context& ctx, ...@@ -60,10 +60,10 @@ void SliceCompute(const Context& ctx,
} }
} }
CheckAndUpdateSliceAttrs<int64_t>(in_dims, axes, &starts, &ends); funcs::CheckAndUpdateSliceAttrs<int64_t>(in_dims, axes, &starts, &ends);
slice_dims = slice_dims = funcs::GetSliceDims<int64_t>(
GetSliceDims<int64_t>(in_dims, axes, starts, ends, nullptr, nullptr); in_dims, axes, starts, ends, nullptr, nullptr);
out_dims = GetDecreasedDims<int64_t>(slice_dims, decrease_axis); out_dims = funcs::GetDecreasedDims<int64_t>(slice_dims, decrease_axis);
// 2.2 Get output // 2.2 Get output
auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>(); auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册