sequence_scale.cu 2.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
W
wanghaoshuang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/kernels/funcs/sequence_scale.h"
0
0x45f 已提交
16
#include "paddle/phi/backends/gpu/gpu_context.h"
17
#include "paddle/phi/backends/gpu/gpu_primitives.h"
W
wanghaoshuang 已提交
18

19 20
namespace phi {
namespace funcs {
W
wanghaoshuang 已提交
21

22
using phi::PADDLE_CUDA_NUM_THREADS;
23 24

template <typename T, int BlockSize>
25 26 27
__global__ void SequenceScaleKernel(T* seq,
                                    size_t* lod,
                                    const T* scales,
W
wanghaoshuang 已提交
28
                                    const size_t seq_width) {
29 30 31 32
  for (int i = threadIdx.x;
       i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width;
       i += BlockSize) {
    int idx = lod[blockIdx.x] * seq_width + i;
33
    seq[idx] *= scales[blockIdx.x];
W
wanghaoshuang 已提交
34 35 36
  }
}

0
0x45f 已提交
37 38 39
template <typename T>
class ScaleLoDTensorFunctor<phi::GPUContext, T> {
 public:
40 41
  void operator()(const phi::GPUContext& context,
                  const T* scales,
42
                  phi::DenseTensor* seq) {
0
0x45f 已提交
43 44 45 46
    const size_t level = 0;
    auto lod = seq->lod();
    const size_t num_seq = lod[level].size() - 1;
    const size_t seq_width = seq->numel() / seq->dims()[0];
47 48
    auto abs_offset_lod = paddle::framework::ToAbsOffset(lod);
    T* seq_data = context.template Alloc<T>(seq);
0
0x45f 已提交
49 50 51 52 53
    paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level]));

#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(
        HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>),
54 55 56 57 58 59 60
        dim3(num_seq),
        dim3(PADDLE_CUDA_NUM_THREADS),
        0,
        context.stream(),
        seq_data,
        mix_vector.CUDAMutableData(context.GetPlace()),
        scales,
0
0x45f 已提交
61 62
        seq_width);
#else
63 64
    SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>
        <<<num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>(
65 66 67
            seq_data,
            mix_vector.CUDAMutableData(context.GetPlace()),
            scales,
68
            seq_width);
0
0x45f 已提交
69 70 71 72 73 74 75 76
#endif
    mix_vector.CopyToCPU();
  }
};

template class ScaleLoDTensorFunctor<phi::GPUContext, float>;
template class ScaleLoDTensorFunctor<phi::GPUContext, double>;

77 78
}  // namespace funcs
}  // namespace phi