未验证 提交 6c181d1d 编写于 作者: 张春乔 提交者: GitHub

【Hackathon No.68】Remove utils in phi (#50833)

* remove utils

* remove utils

* remove utils

* remove utils

* Update get_data_from_tensor.h

* Update rnn_functor.h

* Update rnn_grad_kernel.cu.cc

* Update rnn_kernel.cu.cc

* Update rnn_kernel.cc

* Update rnn_grad_kernel.cu.cc

* Update rnn_functor.h

* Update rnn_kernel.cu.cc

* Update rnn_kernel.cc

* remove utils

* Update rnn_functor.h

* remove utils

* remove utils

* remove utils

* remove utils

* remove utils

* Update rnn_functor.h

* Update unsqueeze_op.h

* Update utils.h

* roll back

* Update tensor_utils.h

* Update tensor_utils.h

* Update tensor_utils.h

* Update tensor_utils.h

* Update tensor_utils.h

* use TensorToVector

* use TensorToVector

* use TensorToVector

* use TensorToVector

* use TensorToVector

* Update rnn_kernel.cc

* Update rnn_grad_kernel.cc

* Update rnn_functor.h

* Update rnn_grad_kernel.cu.cc

* Update rnn_kernel.cu.cc

* Update rnn_functor.h

* Update rnn_grad_kernel.cu.cc

* Update rnn_kernel.cu.cc

* Update rnn_functor.h

* Update rnn_grad_kernel.cu.cc

* Update rnn_kernel.cu.cc

* add TensorToVector

* roll back

* Update tensor_utils.h

* Update rnn_functor.h

* Update rnn_grad_kernel.cu.cc

* Update tensor_utils.h

* Update rnn_kernel.cu.cc

* Update rnn_grad_kernel.cc

* Update rnn_kernel.cc

* Update rnn_grad_kernel.cu.cc

* Update rnn_kernel.cu.cc

* Update rnn_grad_kernel.cc

* Update rnn_kernel.cc

* TensorCopySync to phi::Copy

* fix codestyle

* rnn_kernel.cc: add ;

* replace all GetDataFromTensor with phi::GetVectorFromTensor

* delete include of util.h
上级 b8c06b6a
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/concat_op.h" #include "paddle/fluid/operators/concat_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -32,7 +33,7 @@ class ConcatMLUKernel : public framework::OpKernel<T> { ...@@ -32,7 +33,7 @@ class ConcatMLUKernel : public framework::OpKernel<T> {
bool need_resize_out_dims = false; bool need_resize_out_dims = false;
if (ctx.HasInput("AxisTensor")) { if (ctx.HasInput("AxisTensor")) {
auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor"); auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor");
axis = GetDataFromTensor<int>(axis_tensor)[0]; axis = phi::GetVectorFromTensor<int>(axis_tensor)[0];
need_resize_out_dims = true; need_resize_out_dims = true;
} }
axis = ComputeAxis(static_cast<int64_t>(axis), axis = ComputeAxis(static_cast<int64_t>(axis),
...@@ -97,7 +98,7 @@ class ConcatGradMLUKernel : public framework::OpKernel<T> { ...@@ -97,7 +98,7 @@ class ConcatGradMLUKernel : public framework::OpKernel<T> {
if (ctx.HasInput("AxisTensor")) { if (ctx.HasInput("AxisTensor")) {
auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor"); auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor");
axis = GetDataFromTensor<int>(axis_tensor)[0]; axis = phi::GetVectorFromTensor<int>(axis_tensor)[0];
} }
axis = ComputeAxis(static_cast<int64_t>(axis), axis = ComputeAxis(static_cast<int64_t>(axis),
......
...@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/core/generator.h" #include "paddle/phi/core/generator.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/operators/cudnn_lstm_cache.h" #include "paddle/fluid/operators/cudnn_lstm_cache.h"
...@@ -242,7 +242,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel<T> { ...@@ -242,7 +242,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel<T> {
std::vector<int> SequenceLength; std::vector<int> SequenceLength;
if (has_seq_length) { if (has_seq_length) {
auto *sequence_length = ctx.Input<phi::DenseTensor>("SequenceLength"); auto *sequence_length = ctx.Input<phi::DenseTensor>("SequenceLength");
SequenceLength = operators::GetDataFromTensor<int>(sequence_length); SequenceLength = phi::GetVectorFromTensor<int>(sequence_length);
} }
auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
...@@ -532,7 +532,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel<T> { ...@@ -532,7 +532,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel<T> {
std::vector<int> SequenceLength; std::vector<int> SequenceLength;
if (has_seq_length) { if (has_seq_length) {
auto *sequence_length = ctx.Input<phi::DenseTensor>("SequenceLength"); auto *sequence_length = ctx.Input<phi::DenseTensor>("SequenceLength");
SequenceLength = operators::GetDataFromTensor<int>(sequence_length); SequenceLength = phi::GetVectorFromTensor<int>(sequence_length);
} }
int seq_length = input_dims[0]; int seq_length = input_dims[0];
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/interpolate_op.h" #include "paddle/fluid/operators/interpolate_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -96,7 +96,7 @@ class InterpolateV2MLUKernel : public framework::OpKernel<T> { ...@@ -96,7 +96,7 @@ class InterpolateV2MLUKernel : public framework::OpKernel<T> {
auto scale = ctx.Attr<std::vector<float>>("scale"); auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) { if (scale_tensor != nullptr) {
std::vector<float> scale_data; std::vector<float> scale_data;
scale_data = GetDataFromTensor<float>(scale_tensor); scale_data = phi::GetVectorFromTensor<float>(scale_tensor);
if (scale_data.size() > 1 && scale_data.size() <= 2) { if (scale_data.size() > 1 && scale_data.size() <= 2) {
scale_h = scale_data[0]; scale_h = scale_data[0];
...@@ -147,7 +147,7 @@ class InterpolateV2MLUKernel : public framework::OpKernel<T> { ...@@ -147,7 +147,7 @@ class InterpolateV2MLUKernel : public framework::OpKernel<T> {
auto out_size = ctx.Input<phi::DenseTensor>("OutSize"); auto out_size = ctx.Input<phi::DenseTensor>("OutSize");
if (out_size != nullptr) { if (out_size != nullptr) {
std::vector<int32_t> out_size_data; std::vector<int32_t> out_size_data;
out_size_data = GetDataFromTensor<int>(out_size); out_size_data = phi::GetVectorFromTensor<int>(out_size);
if (out_size_data.size() <= 2) { if (out_size_data.size() <= 2) {
out_h = out_size_data[0]; out_h = out_size_data[0];
out_w = out_size_data[1]; out_w = out_size_data[1];
...@@ -398,7 +398,7 @@ class InterpolateV2GradMLUKernel : public framework::OpKernel<T> { ...@@ -398,7 +398,7 @@ class InterpolateV2GradMLUKernel : public framework::OpKernel<T> {
auto scale = ctx.Attr<std::vector<float>>("scale"); auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) { if (scale_tensor != nullptr) {
std::vector<float> scale_data; std::vector<float> scale_data;
scale_data = GetDataFromTensor<float>(scale_tensor); scale_data = phi::GetVectorFromTensor<float>(scale_tensor);
if (scale_data.size() > 1) { if (scale_data.size() > 1) {
scale_h = scale_data[0]; scale_h = scale_data[0];
scale_w = scale_data[1]; scale_w = scale_data[1];
...@@ -430,7 +430,7 @@ class InterpolateV2GradMLUKernel : public framework::OpKernel<T> { ...@@ -430,7 +430,7 @@ class InterpolateV2GradMLUKernel : public framework::OpKernel<T> {
auto out_size = ctx.Input<phi::DenseTensor>("OutSize"); auto out_size = ctx.Input<phi::DenseTensor>("OutSize");
if (out_size != nullptr) { if (out_size != nullptr) {
std::vector<int32_t> out_size_data; std::vector<int32_t> out_size_data;
out_size_data = GetDataFromTensor<int>(out_size); out_size_data = phi::GetVectorFromTensor<int>(out_size);
out_h = out_size_data[0]; out_h = out_size_data[0];
out_w = out_size_data[1]; out_w = out_size_data[1];
} }
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -31,8 +31,8 @@ class OneHotV2MLUKernel : public framework::OpKernel<T> { ...@@ -31,8 +31,8 @@ class OneHotV2MLUKernel : public framework::OpKernel<T> {
int depth = ctx.Attr<int>("depth"); int depth = ctx.Attr<int>("depth");
if (ctx.HasInput("depth_tensor")) { if (ctx.HasInput("depth_tensor")) {
std::vector<int32_t> depth_data; std::vector<int32_t> depth_data;
depth_data = depth_data = phi::GetVectorFromTensor<int>(
GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("depth_tensor")); ctx.Input<phi::DenseTensor>("depth_tensor"));
depth = depth_data[0]; depth = depth_data[0];
auto out_dims = out->dims(); auto out_dims = out->dims();
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -38,14 +38,15 @@ class Reshape2MLUKernel : public framework::OpKernel<T> { ...@@ -38,14 +38,15 @@ class Reshape2MLUKernel : public framework::OpKernel<T> {
"shape is [%d]", "shape is [%d]",
shape_tensor->dims().size())); shape_tensor->dims().size()));
target_shape_vector.push_back(GetDataFromTensor<int>(shape_tensor)[0]); target_shape_vector.push_back(
phi::GetVectorFromTensor<int>(shape_tensor)[0]);
} }
} else { } else {
auto* shape_tensor = ctx.HasInput("Shape") auto* shape_tensor = ctx.HasInput("Shape")
? ctx.Input<phi::DenseTensor>("Shape") ? ctx.Input<phi::DenseTensor>("Shape")
: nullptr; : nullptr;
if (shape_tensor) { if (shape_tensor) {
target_shape_vector = GetDataFromTensor<int>(shape_tensor); target_shape_vector = phi::GetVectorFromTensor<int>(shape_tensor);
} else { } else {
target_shape_vector = ctx.Attr<std::vector<int>>("shape"); target_shape_vector = ctx.Attr<std::vector<int>>("shape");
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
......
...@@ -16,8 +16,8 @@ limitations under the License. */ ...@@ -16,8 +16,8 @@ limitations under the License. */
#include <string> #include <string>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h" #include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -46,14 +46,15 @@ class Reshape2NPUKernel : public framework::OpKernel<T> { ...@@ -46,14 +46,15 @@ class Reshape2NPUKernel : public framework::OpKernel<T> {
"shape is [%d]", "shape is [%d]",
shape_tensor->dims().size())); shape_tensor->dims().size()));
target_shape_vector.push_back(GetDataFromTensor<int>(shape_tensor)[0]); target_shape_vector.push_back(
phi::GetVectorFromTensor<int>(shape_tensor)[0]);
} }
} else { } else {
auto* shape_tensor = ctx.HasInput("Shape") auto* shape_tensor = ctx.HasInput("Shape")
? ctx.Input<phi::DenseTensor>("Shape") ? ctx.Input<phi::DenseTensor>("Shape")
: nullptr; : nullptr;
if (shape_tensor) { if (shape_tensor) {
target_shape_vector = GetDataFromTensor<int>(shape_tensor); target_shape_vector = phi::GetVectorFromTensor<int>(shape_tensor);
} else { } else {
target_shape_vector = ctx.Attr<std::vector<int>>("shape"); target_shape_vector = ctx.Attr<std::vector<int>>("shape");
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
......
...@@ -11,10 +11,10 @@ limitations under the License. */ ...@@ -11,10 +11,10 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device/device_wrapper.h" #include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h" #include "paddle/fluid/platform/device/xpu/xpu_header.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle { namespace paddle {
...@@ -97,7 +97,7 @@ class RNNMLUKernel : public framework::OpKernel<T> { ...@@ -97,7 +97,7 @@ class RNNMLUKernel : public framework::OpKernel<T> {
std::vector<int> seq_len_vec(batch_size, seq_len); std::vector<int> seq_len_vec(batch_size, seq_len);
if (has_seq_length) { // set seq_len if no padding, otherwise seq_len for if (has_seq_length) { // set seq_len if no padding, otherwise seq_len for
// each element. // each element.
seq_len_vec = operators::GetDataFromTensor(sequence_length); seq_len_vec = phi::GetVectorFromTensor(sequence_length);
} }
cnnlDirectionMode_t direction = cnnlDirectionMode_t direction =
is_bidirec ? CNNL_RNN_BIDIRECTIONAL : CNNL_RNN_UNIDIRECTIONAL; is_bidirec ? CNNL_RNN_BIDIRECTIONAL : CNNL_RNN_UNIDIRECTIONAL;
...@@ -480,7 +480,7 @@ class RNNMLUGradKernel : public framework::OpKernel<T> { ...@@ -480,7 +480,7 @@ class RNNMLUGradKernel : public framework::OpKernel<T> {
std::vector<int> seq_len_vec(batch_size, seq_len); std::vector<int> seq_len_vec(batch_size, seq_len);
if (has_seq_length) { if (has_seq_length) {
seq_len_vec = operators::GetDataFromTensor(sequence_length); seq_len_vec = phi::GetVectorFromTensor(sequence_length);
} }
cnnlDirectionMode_t direction = cnnlDirectionMode_t direction =
is_bidirec ? CNNL_RNN_BIDIRECTIONAL : CNNL_RNN_UNIDIRECTIONAL; is_bidirec ? CNNL_RNN_BIDIRECTIONAL : CNNL_RNN_UNIDIRECTIONAL;
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/slice_utils.h" #include "paddle/phi/kernels/funcs/slice_utils.h"
namespace paddle { namespace paddle {
...@@ -38,15 +39,16 @@ class SliceMLUKernel : public framework::OpKernel<T> { ...@@ -38,15 +39,16 @@ class SliceMLUKernel : public framework::OpKernel<T> {
auto starts_tensor_list = auto starts_tensor_list =
ctx.MultiInput<phi::DenseTensor>("StartsTensorList"); ctx.MultiInput<phi::DenseTensor>("StartsTensorList");
if (ctx.HasInput("StartsTensor")) { if (ctx.HasInput("StartsTensor")) {
starts = starts = phi::GetVectorFromTensor<int>(
GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("StartsTensor")); ctx.Input<phi::DenseTensor>("StartsTensor"));
} else if (starts_tensor_list.size() > 0) { } else if (starts_tensor_list.size() > 0) {
starts = GetDataFromTensorList<int>(starts_tensor_list); starts = GetDataFromTensorList<int>(starts_tensor_list);
} }
auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList"); auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList");
if (ctx.HasInput("EndsTensor")) { if (ctx.HasInput("EndsTensor")) {
ends = GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("EndsTensor")); ends = phi::GetVectorFromTensor<int>(
ctx.Input<phi::DenseTensor>("EndsTensor"));
} else if (ends_tensor_list.size() > 0) { } else if (ends_tensor_list.size() > 0) {
ends = GetDataFromTensorList<int>(ends_tensor_list); ends = GetDataFromTensorList<int>(ends_tensor_list);
} }
...@@ -141,15 +143,16 @@ class SliceGradMLUKernel : public framework::OpKernel<T> { ...@@ -141,15 +143,16 @@ class SliceGradMLUKernel : public framework::OpKernel<T> {
auto starts_tensor_list = auto starts_tensor_list =
ctx.MultiInput<phi::DenseTensor>("StartsTensorList"); ctx.MultiInput<phi::DenseTensor>("StartsTensorList");
if (ctx.HasInput("StartsTensor")) { if (ctx.HasInput("StartsTensor")) {
starts = starts = phi::GetVectorFromTensor<int>(
GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("StartsTensor")); ctx.Input<phi::DenseTensor>("StartsTensor"));
} else if (starts_tensor_list.size() > 0) { } else if (starts_tensor_list.size() > 0) {
starts = GetDataFromTensorList<int>(starts_tensor_list); starts = GetDataFromTensorList<int>(starts_tensor_list);
} }
auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList"); auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList");
if (ctx.HasInput("EndsTensor")) { if (ctx.HasInput("EndsTensor")) {
ends = GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("EndsTensor")); ends = phi::GetVectorFromTensor<int>(
ctx.Input<phi::DenseTensor>("EndsTensor"));
} else if (ends_tensor_list.size() > 0) { } else if (ends_tensor_list.size() > 0) {
ends = GetDataFromTensorList<int>(ends_tensor_list); ends = GetDataFromTensorList<int>(ends_tensor_list);
} }
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h" #include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/slice_utils.h" #include "paddle/phi/kernels/funcs/slice_utils.h"
namespace paddle { namespace paddle {
...@@ -77,15 +78,16 @@ class SliceNPUKernel : public framework::OpKernel<T> { ...@@ -77,15 +78,16 @@ class SliceNPUKernel : public framework::OpKernel<T> {
auto starts_tensor_list = auto starts_tensor_list =
ctx.MultiInput<phi::DenseTensor>("StartsTensorList"); ctx.MultiInput<phi::DenseTensor>("StartsTensorList");
if (ctx.HasInput("StartsTensor")) { if (ctx.HasInput("StartsTensor")) {
starts = starts = phi::GetVectorFromTensor<int>(
GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("StartsTensor")); ctx.Input<phi::DenseTensor>("StartsTensor"));
} else if (starts_tensor_list.size() > 0) { } else if (starts_tensor_list.size() > 0) {
starts = GetDataFromTensorList<int>(starts_tensor_list); starts = GetDataFromTensorList<int>(starts_tensor_list);
} }
auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList"); auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList");
if (ctx.HasInput("EndsTensor")) { if (ctx.HasInput("EndsTensor")) {
ends = GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("EndsTensor")); ends = phi::GetVectorFromTensor<int>(
ctx.Input<phi::DenseTensor>("EndsTensor"));
} else if (ends_tensor_list.size() > 0) { } else if (ends_tensor_list.size() > 0) {
ends = GetDataFromTensorList<int>(ends_tensor_list); ends = GetDataFromTensorList<int>(ends_tensor_list);
} }
...@@ -172,15 +174,16 @@ class SliceGradNPUKernel : public framework::OpKernel<T> { ...@@ -172,15 +174,16 @@ class SliceGradNPUKernel : public framework::OpKernel<T> {
auto starts_tensor_list = auto starts_tensor_list =
ctx.MultiInput<phi::DenseTensor>("StartsTensorList"); ctx.MultiInput<phi::DenseTensor>("StartsTensorList");
if (ctx.HasInput("StartsTensor")) { if (ctx.HasInput("StartsTensor")) {
starts = starts = phi::GetVectorFromTensor<int>(
GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("StartsTensor")); ctx.Input<phi::DenseTensor>("StartsTensor"));
} else if (starts_tensor_list.size() > 0) { } else if (starts_tensor_list.size() > 0) {
starts = GetDataFromTensorList<int>(starts_tensor_list); starts = GetDataFromTensorList<int>(starts_tensor_list);
} }
auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList"); auto ends_tensor_list = ctx.MultiInput<phi::DenseTensor>("EndsTensorList");
if (ctx.HasInput("EndsTensor")) { if (ctx.HasInput("EndsTensor")) {
ends = GetDataFromTensor<int>(ctx.Input<phi::DenseTensor>("EndsTensor")); ends = phi::GetVectorFromTensor<int>(
ctx.Input<phi::DenseTensor>("EndsTensor"));
} else if (ends_tensor_list.size() > 0) { } else if (ends_tensor_list.size() > 0) {
ends = GetDataFromTensorList<int>(ends_tensor_list); ends = GetDataFromTensorList<int>(ends_tensor_list);
} }
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/split_op.h" #include "paddle/fluid/operators/split_op.h"
#include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -35,7 +36,7 @@ class SplitMLUKernel : public framework::OpKernel<T> { ...@@ -35,7 +36,7 @@ class SplitMLUKernel : public framework::OpKernel<T> {
bool need_resize_outs_dims = false; bool need_resize_outs_dims = false;
if (ctx.HasInput("AxisTensor")) { if (ctx.HasInput("AxisTensor")) {
auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor"); auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor");
axis = GetDataFromTensor(axis_tensor)[0]; axis = phi::GetVectorFromTensor(axis_tensor)[0];
need_resize_outs_dims = true; need_resize_outs_dims = true;
} }
auto sections_tensor_list = auto sections_tensor_list =
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/strided_slice.h" #include "paddle/phi/kernels/funcs/strided_slice.h"
namespace paddle { namespace paddle {
...@@ -168,21 +169,21 @@ class StridedSliceMLUKernel : public framework::OpKernel<T> { ...@@ -168,21 +169,21 @@ class StridedSliceMLUKernel : public framework::OpKernel<T> {
starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor); starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor);
} else if (ctx.HasInput("StartsTensor")) { } else if (ctx.HasInput("StartsTensor")) {
auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor"); auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor");
starts = GetDataFromTensor<int64_t>(starts_tensor); starts = phi::GetVectorFromTensor<int64_t>(starts_tensor);
} }
if (list_new_ends_tensor.size() > 0) { if (list_new_ends_tensor.size() > 0) {
ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor); ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor);
} else if (ctx.HasInput("EndsTensor")) { } else if (ctx.HasInput("EndsTensor")) {
auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor"); auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor");
ends = GetDataFromTensor<int64_t>(ends_tensor); ends = phi::GetVectorFromTensor<int64_t>(ends_tensor);
} }
if (list_new_strides_tensor.size() > 0) { if (list_new_strides_tensor.size() > 0) {
strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor); strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor);
} else if (ctx.HasInput("StridesTensor")) { } else if (ctx.HasInput("StridesTensor")) {
auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor"); auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor");
strides = GetDataFromTensor<int64_t>(strides_tensor); strides = phi::GetVectorFromTensor<int64_t>(strides_tensor);
} }
// out dims calculation // out dims calculation
...@@ -336,21 +337,21 @@ class StridedSliceGradMLUKernel : public framework::OpKernel<T> { ...@@ -336,21 +337,21 @@ class StridedSliceGradMLUKernel : public framework::OpKernel<T> {
starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor); starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor);
} else if (ctx.HasInput("StartsTensor")) { } else if (ctx.HasInput("StartsTensor")) {
auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor"); auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor");
starts = GetDataFromTensor<int64_t>(starts_tensor); starts = phi::GetVectorFromTensor<int64_t>(starts_tensor);
} }
if (list_new_ends_tensor.size() > 0) { if (list_new_ends_tensor.size() > 0) {
ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor); ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor);
} else if (ctx.HasInput("EndsTensor")) { } else if (ctx.HasInput("EndsTensor")) {
auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor"); auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor");
ends = GetDataFromTensor<int64_t>(ends_tensor); ends = phi::GetVectorFromTensor<int64_t>(ends_tensor);
} }
if (list_new_strides_tensor.size() > 0) { if (list_new_strides_tensor.size() > 0) {
strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor); strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor);
} else if (ctx.HasInput("StridesTensor")) { } else if (ctx.HasInput("StridesTensor")) {
auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor"); auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor");
strides = GetDataFromTensor<int64_t>(strides_tensor); strides = phi::GetVectorFromTensor<int64_t>(strides_tensor);
} }
std::vector<int64_t> out_dims_vector(input_dims.size(), -1); std::vector<int64_t> out_dims_vector(input_dims.size(), -1);
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h" #include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/strided_slice.h" #include "paddle/phi/kernels/funcs/strided_slice.h"
namespace paddle { namespace paddle {
...@@ -99,21 +100,21 @@ class StridedSliceNPUKernel : public framework::OpKernel<T> { ...@@ -99,21 +100,21 @@ class StridedSliceNPUKernel : public framework::OpKernel<T> {
starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor); starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor);
} else if (ctx.HasInput("StartsTensor")) { } else if (ctx.HasInput("StartsTensor")) {
auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor"); auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor");
starts = GetDataFromTensor<int64_t>(starts_tensor); starts = phi::GetVectorFromTensor<int64_t>(starts_tensor);
} }
if (list_new_ends_tensor.size() > 0) { if (list_new_ends_tensor.size() > 0) {
ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor); ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor);
} else if (ctx.HasInput("EndsTensor")) { } else if (ctx.HasInput("EndsTensor")) {
auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor"); auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor");
ends = GetDataFromTensor<int64_t>(ends_tensor); ends = phi::GetVectorFromTensor<int64_t>(ends_tensor);
} }
if (list_new_strides_tensor.size() > 0) { if (list_new_strides_tensor.size() > 0) {
strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor); strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor);
} else if (ctx.HasInput("StridesTensor")) { } else if (ctx.HasInput("StridesTensor")) {
auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor"); auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor");
strides = GetDataFromTensor<int64_t>(strides_tensor); strides = phi::GetVectorFromTensor<int64_t>(strides_tensor);
} }
// out dims calculation // out dims calculation
...@@ -325,21 +326,21 @@ class StridedSliceGradNPUKernel : public framework::OpKernel<T> { ...@@ -325,21 +326,21 @@ class StridedSliceGradNPUKernel : public framework::OpKernel<T> {
starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor); starts = GetDataFromTensorList<int64_t>(list_new_starts_tensor);
} else if (ctx.HasInput("StartsTensor")) { } else if (ctx.HasInput("StartsTensor")) {
auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor"); auto* starts_tensor = ctx.Input<phi::DenseTensor>("StartsTensor");
starts = GetDataFromTensor<int64_t>(starts_tensor); starts = phi::GetVectorFromTensor<int64_t>(starts_tensor);
} }
if (list_new_ends_tensor.size() > 0) { if (list_new_ends_tensor.size() > 0) {
ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor); ends = GetDataFromTensorList<int64_t>(list_new_ends_tensor);
} else if (ctx.HasInput("EndsTensor")) { } else if (ctx.HasInput("EndsTensor")) {
auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor"); auto* ends_tensor = ctx.Input<phi::DenseTensor>("EndsTensor");
ends = GetDataFromTensor<int64_t>(ends_tensor); ends = phi::GetVectorFromTensor<int64_t>(ends_tensor);
} }
if (list_new_strides_tensor.size() > 0) { if (list_new_strides_tensor.size() > 0) {
strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor); strides = GetDataFromTensorList<int64_t>(list_new_strides_tensor);
} else if (ctx.HasInput("StridesTensor")) { } else if (ctx.HasInput("StridesTensor")) {
auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor"); auto* strides_tensor = ctx.Input<phi::DenseTensor>("StridesTensor");
strides = GetDataFromTensor<int64_t>(strides_tensor); strides = phi::GetVectorFromTensor<int64_t>(strides_tensor);
} }
std::vector<int64_t> out_dims_vector(input_dims.size(), -1); std::vector<int64_t> out_dims_vector(input_dims.size(), -1);
......
...@@ -19,6 +19,7 @@ limitations under the License. */ ...@@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
...@@ -42,7 +43,7 @@ class UnsqueezeKernel : public framework::OpKernel<T> { ...@@ -42,7 +43,7 @@ class UnsqueezeKernel : public framework::OpKernel<T> {
axes = GetDataFromTensorList<int>(axes_tensor_list); axes = GetDataFromTensorList<int>(axes_tensor_list);
} else if (context.HasInput("AxesTensor")) { } else if (context.HasInput("AxesTensor")) {
auto *axes_tensor = context.Input<phi::DenseTensor>("AxesTensor"); auto *axes_tensor = context.Input<phi::DenseTensor>("AxesTensor");
axes = GetDataFromTensor<int>(axes_tensor); axes = phi::GetVectorFromTensor<int>(axes_tensor);
} }
need_resize_out_dims = true; need_resize_out_dims = true;
} }
......
...@@ -18,41 +18,11 @@ limitations under the License. */ ...@@ -18,41 +18,11 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/phi/core/tensor_utils.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename T = int32_t>
inline std::vector<T> GetDataFromTensor(const phi::DenseTensor* x) {
std::vector<T> vec_new_data;
if (framework::TransToProtoVarType(x->dtype()) ==
framework::proto::VarType::INT32) {
auto* data = x->data<int>();
phi::DenseTensor cpu_attr_tensor;
if (!platform::is_cpu_place(x->place())) {
paddle::framework::TensorCopySync(
*x, platform::CPUPlace(), &cpu_attr_tensor);
data = cpu_attr_tensor.data<int>();
}
vec_new_data = std::vector<T>(data, data + x->numel());
} else if (framework::TransToProtoVarType(x->dtype()) ==
framework::proto::VarType::INT64) {
auto* data = x->data<int64_t>();
phi::DenseTensor cpu_attr_tensor;
if (!platform::is_cpu_place(x->place())) {
paddle::framework::TensorCopySync(
*x, platform::CPUPlace(), &cpu_attr_tensor);
data = cpu_attr_tensor.data<int64_t>();
}
// NOTE: Converting int64 to int32 may cause data overflow.
vec_new_data = std::vector<T>(data, data + x->numel());
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The dtype of Tensor must be int32 or int64, but received: %s",
framework::TransToProtoVarType(x->dtype())));
}
return vec_new_data;
}
template <typename T = int32_t> template <typename T = int32_t>
inline std::vector<T> GetDataFromTensorList( inline std::vector<T> GetDataFromTensorList(
const std::vector<const phi::DenseTensor*>& list_tensor) { const std::vector<const phi::DenseTensor*>& list_tensor) {
...@@ -100,7 +70,7 @@ inline framework::DDim GetShape(const framework::ExecutionContext& ctx) { ...@@ -100,7 +70,7 @@ inline framework::DDim GetShape(const framework::ExecutionContext& ctx) {
// 1. shape is a Tensor // 1. shape is a Tensor
if (ctx.HasInput("ShapeTensor")) { if (ctx.HasInput("ShapeTensor")) {
auto* shape_tensor = ctx.Input<phi::DenseTensor>("ShapeTensor"); auto* shape_tensor = ctx.Input<phi::DenseTensor>("ShapeTensor");
auto vec_shape = GetDataFromTensor<int>(shape_tensor); auto vec_shape = phi::GetVectorFromTensor<int>(shape_tensor);
return phi::make_ddim(vec_shape); return phi::make_ddim(vec_shape);
} }
......
...@@ -14,12 +14,16 @@ limitations under the License. */ ...@@ -14,12 +14,16 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h" #include "paddle/phi/core/device_context.h"
#include "paddle/phi/core/selected_rows.h" #include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h"
#include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/utils/data_type.h"
namespace phi { namespace phi {
...@@ -144,4 +148,36 @@ inline T GetValue(const Context& dev_ctx, const DenseTensor& x) { ...@@ -144,4 +148,36 @@ inline T GetValue(const Context& dev_ctx, const DenseTensor& x) {
return value; return value;
} }
template <typename T = int32_t>
inline std::vector<T> GetVectorFromTensor(const phi::DenseTensor* x) {
std::vector<T> vec_new_data;
if (phi::TransToProtoVarType(x->dtype()) == ProtoDataType::INT32) {
auto* data = x->data<int>();
phi::DenseTensor cpu_attr_tensor;
if (!paddle::platform::is_cpu_place(x->place())) {
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(x->place());
phi::Copy(*dev_ctx, *x, CPUPlace(), true, &cpu_attr_tensor);
data = cpu_attr_tensor.data<int>();
}
vec_new_data = std::vector<T>(data, data + x->numel());
} else if (phi::TransToProtoVarType(x->dtype()) == ProtoDataType::INT64) {
auto* data = x->data<int64_t>();
phi::DenseTensor cpu_attr_tensor;
if (!paddle::platform::is_cpu_place(x->place())) {
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(x->place());
phi::Copy(*dev_ctx, *x, CPUPlace(), true, &cpu_attr_tensor);
data = cpu_attr_tensor.data<int64_t>();
}
// NOTE: Converting int64 to int32 may cause data overflow.
vec_new_data = std::vector<T>(data, data + x->numel());
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"The dtype of Tensor must be int32 or int64, but received: %s",
phi::TransToProtoVarType(x->dtype())));
}
return vec_new_data;
}
} // namespace phi } // namespace phi
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#pragma once #pragma once
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/generator.h" #include "paddle/phi/core/generator.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
...@@ -47,8 +46,7 @@ void CreateMaskMatrix(const CPUContext& dev_ctx, ...@@ -47,8 +46,7 @@ void CreateMaskMatrix(const CPUContext& dev_ctx,
DenseTensor* mask_matrix, DenseTensor* mask_matrix,
const bool& is_reverse, const bool& is_reverse,
int* min_seq_len) { int* min_seq_len) {
const auto& seq_len_vec = const auto& seq_len_vec = phi::GetVectorFromTensor<int>(sequence_length);
paddle::operators::GetDataFromTensor<int>(sequence_length);
const int table_width = mask_matrix->dims()[0]; const int table_width = mask_matrix->dims()[0];
DenseTensor temp = DenseTensor temp =
Empty<T>(dev_ctx, {mask_matrix->dims()[1], mask_matrix->dims()[0]}); Empty<T>(dev_ctx, {mask_matrix->dims()[1], mask_matrix->dims()[0]});
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#include "paddle/phi/kernels/rnn_grad_kernel.h" #include "paddle/phi/kernels/rnn_grad_kernel.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/gpu/rnn_functor.h" #include "paddle/phi/kernels/gpu/rnn_functor.h"
...@@ -223,8 +223,7 @@ void RnnGradKernel(const Context &dev_ctx, ...@@ -223,8 +223,7 @@ void RnnGradKernel(const Context &dev_ctx,
#endif #endif
std::vector<int> SequenceLength; std::vector<int> SequenceLength;
if (has_seq_length) { if (has_seq_length) {
SequenceLength = SequenceLength = phi::GetVectorFromTensor<int>(sequence_length.get_ptr());
paddle::operators::GetDataFromTensor<int>(sequence_length.get_ptr());
} }
auto input_dims = x.dims(); auto input_dims = x.dims();
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
#include "paddle/phi/kernels/rnn_kernel.h" #include "paddle/phi/kernels/rnn_kernel.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/generator.h" #include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/gpu/rnn_functor.h" #include "paddle/phi/kernels/gpu/rnn_functor.h"
...@@ -205,8 +205,7 @@ void RnnKernel(const Context &dev_ctx, ...@@ -205,8 +205,7 @@ void RnnKernel(const Context &dev_ctx,
#endif #endif
std::vector<int> SequenceLength; std::vector<int> SequenceLength;
if (has_seq_length) { if (has_seq_length) {
SequenceLength = SequenceLength = phi::GetVectorFromTensor<int>(sequence_length.get_ptr());
paddle::operators::GetDataFromTensor<int>(sequence_length.get_ptr());
} }
auto handle = dev_ctx.cudnn_handle(); auto handle = dev_ctx.cudnn_handle();
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/rnn_grad_kernel.h" #include "paddle/phi/kernels/rnn_grad_kernel.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h" #include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/xpu/rnn_util.h" #include "paddle/phi/kernels/xpu/rnn_util.h"
...@@ -165,8 +165,7 @@ void RnnGradKernel(const Context& dev_ctx, ...@@ -165,8 +165,7 @@ void RnnGradKernel(const Context& dev_ctx,
bool has_seq_length = sequence_length.is_initialized(); bool has_seq_length = sequence_length.is_initialized();
std::vector<int> seq_len_tensor(batch_size, seq_len); std::vector<int> seq_len_tensor(batch_size, seq_len);
if (has_seq_length) { if (has_seq_length) {
seq_len_tensor = seq_len_tensor = phi::GetVectorFromTensor<int>(sequence_length.get_ptr());
paddle::operators::GetDataFromTensor<int>(sequence_length.get_ptr());
} }
for (int i = num_layers - 1; i >= 0; --i) { for (int i = num_layers - 1; i >= 0; --i) {
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/rnn_kernel.h" #include "paddle/phi/kernels/rnn_kernel.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h" #include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/xpu/rnn_util.h" #include "paddle/phi/kernels/xpu/rnn_util.h"
...@@ -119,8 +119,7 @@ void RnnKernel(const Context& dev_ctx, ...@@ -119,8 +119,7 @@ void RnnKernel(const Context& dev_ctx,
bool has_seq_length = sequence_length.is_initialized(); bool has_seq_length = sequence_length.is_initialized();
if (has_seq_length) { if (has_seq_length) {
seq_len_tensor = seq_len_tensor = phi::GetVectorFromTensor<int>(sequence_length.get_ptr());
paddle::operators::GetDataFromTensor<int>(sequence_length.get_ptr());
} }
int state_offset = pre_state[0]->dims()[1] * pre_state[0]->dims()[2]; int state_offset = pre_state[0]->dims()[1] * pre_state[0]->dims()[2];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册