未验证 提交 a36c5490 编写于 作者: R Ryan 提交者: GitHub

[PHI Decoupling] move sequence_scale from fluid to phi (#49668)

* try sequence_padding

* fix cant use mutable_data

* fix mistake fluid_sequence_scale.hh/CMakeLists.t include

* fix namespace bug

* fix framework::ToAbsOffset not found

* fix codestyle
上级 923f2458
......@@ -30,7 +30,6 @@ math_library(sampler DEPS generator)
math_library(sequence_padding)
math_library(sequence_pooling DEPS math_function jit_kernel_helper)
math_library(sequence_scale)
if(WITH_ASCEND_CL)
math_library(beam_search DEPS math_function beam_search_npu)
elseif(WITH_XPU)
......
......@@ -22,6 +22,7 @@ math_library(vol2col)
math_library(softmax DEPS math_function)
math_library(maxouting)
math_library(matrix_bit_code)
math_library(sequence_scale)
cc_library(
phi_data_layout_transform
......
......@@ -12,17 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
namespace phi {
class DenseTensor;
} // namespace phi
namespace paddle {
namespace operators {
namespace math {
namespace phi {
namespace funcs {
template <typename T>
class ScaleLoDTensorFunctor<phi::CPUContext, T> {
......@@ -34,9 +32,9 @@ class ScaleLoDTensorFunctor<phi::CPUContext, T> {
auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1;
size_t seq_width = seq->dims()[1];
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
paddle::framework::LoD abs_offset_lod = paddle::framework::ToAbsOffset(lod);
T* seq_data = seq->mutable_data<T>(context.GetPlace());
T* seq_data = context.template Alloc<T>(seq);
for (size_t i = 0; i < num_seq; ++i) {
for (size_t j = lod[level][i] * seq_width;
j < lod[level][i + 1] * seq_width;
......@@ -50,6 +48,5 @@ class ScaleLoDTensorFunctor<phi::CPUContext, T> {
template class ScaleLoDTensorFunctor<phi::CPUContext, float>;
template class ScaleLoDTensorFunctor<phi::CPUContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
......@@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace math {
namespace phi {
namespace funcs {
using phi::PADDLE_CUDA_NUM_THREADS;
......@@ -45,8 +44,8 @@ class ScaleLoDTensorFunctor<phi::GPUContext, T> {
auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1;
const size_t seq_width = seq->numel() / seq->dims()[0];
auto abs_offset_lod = framework::ToAbsOffset(lod);
T* seq_data = seq->mutable_data<T>(context.GetPlace());
auto abs_offset_lod = paddle::framework::ToAbsOffset(lod);
T* seq_data = context.template Alloc<T>(seq);
paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level]));
#ifdef PADDLE_WITH_HIP
......@@ -75,6 +74,5 @@ class ScaleLoDTensorFunctor<phi::GPUContext, T> {
template class ScaleLoDTensorFunctor<phi::GPUContext, float>;
template class ScaleLoDTensorFunctor<phi::GPUContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
......@@ -17,9 +17,8 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/device_context.h"
namespace paddle {
namespace operators {
namespace math {
namespace phi {
namespace funcs {
/*
* \brief Scale a sequence.
......@@ -45,6 +44,7 @@ namespace math {
* \param num_seq Number of sequence
*
*/
template <typename DeviceContext, typename T>
class ScaleLoDTensorFunctor {
public:
......@@ -53,6 +53,5 @@ class ScaleLoDTensorFunctor {
phi::DenseTensor* seq);
};
} // namespace math
} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
......@@ -17,12 +17,12 @@
#include <vector>
#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/phi/backends/dynload/warpctc.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/utils/optional.h"
namespace phi {
......@@ -79,7 +79,7 @@ void WarpctcGradKernel(const Context& dev_ctx,
paddle::operators::math::kLengthBatchWidth);
const T* loss_grad_data = loss_grad.data<T>();
paddle::operators::math::ScaleLoDTensorFunctor<Context, T>()(
phi::funcs::ScaleLoDTensorFunctor<Context, T>()(
dev_ctx, loss_grad_data, logits_grad);
}
}
......
......@@ -17,12 +17,12 @@
#include <vector>
#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/phi/backends/dynload/warpctc.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/utils/optional.h"
namespace phi {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册