未验证 提交 81abaaf5 编写于 作者: G Guoxia Wang 提交者: GitHub

modify index dtype from int to int64_t of concat_and_split_functor (#43479)

上级 a89060ac
...@@ -26,22 +26,21 @@ __global__ void ConcatKernel_(const T** inputs, ...@@ -26,22 +26,21 @@ __global__ void ConcatKernel_(const T** inputs,
const int64_t output_rows, const int64_t output_rows,
const int64_t output_cols, const int64_t output_cols,
T* output) { T* output) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int64_t curr_segment = 0;
int curr_segment = 0; int64_t curr_offset = input_cols[0];
int curr_offset = input_cols[0]; CUDA_KERNEL_LOOP_TYPE(tid_x, output_cols, int64_t) {
for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) { int64_t curr_col_offset = input_cols[curr_segment + 1];
int curr_col_offset = input_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) { while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset; curr_offset = curr_col_offset;
++curr_segment; ++curr_segment;
curr_col_offset = input_cols[curr_segment + 1]; curr_col_offset = input_cols[curr_segment + 1];
} }
int local_col = tid_x - curr_offset; int64_t local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset; int64_t segment_width = curr_col_offset - curr_offset;
const T* input_ptr = inputs[curr_segment]; const T* input_ptr = inputs[curr_segment];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y; int64_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y) for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y)
output[tid_y * output_cols + tid_x] = output[tid_y * output_cols + tid_x] =
input_ptr[tid_y * segment_width + local_col]; input_ptr[tid_y * segment_width + local_col];
...@@ -50,16 +49,15 @@ __global__ void ConcatKernel_(const T** inputs, ...@@ -50,16 +49,15 @@ __global__ void ConcatKernel_(const T** inputs,
template <typename T> template <typename T>
__device__ void ConcatKernelDetail(const T** inputs_data, __device__ void ConcatKernelDetail(const T** inputs_data,
const int fixed_in_col, const int64_t fixed_in_col,
const int out_rows, const int64_t out_rows,
const int out_cols, const int64_t out_cols,
T* output_data) { T* output_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x; CUDA_KERNEL_LOOP_TYPE(tid_x, out_cols, int64_t) {
for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) { int64_t split = tid_x * 1.0 / fixed_in_col;
int split = tid_x * 1.0 / fixed_in_col; int64_t in_offset = tid_x - split * fixed_in_col;
int in_offset = tid_x - split * fixed_in_col;
const T* input_ptr = inputs_data[split]; const T* input_ptr = inputs_data[split];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y; int64_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) { for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) {
output_data[tid_y * out_cols + tid_x] = output_data[tid_y * out_cols + tid_x] =
input_ptr[tid_y * fixed_in_col + in_offset]; input_ptr[tid_y * fixed_in_col + in_offset];
...@@ -133,22 +131,21 @@ __global__ void SplitKernel_(const T* input_data, ...@@ -133,22 +131,21 @@ __global__ void SplitKernel_(const T* input_data,
const int64_t* out_cols, const int64_t* out_cols,
int out_cols_size, int out_cols_size,
T** outputs_data) { T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int64_t curr_segment = 0;
int curr_segment = 0; int64_t curr_offset = out_cols[0];
int curr_offset = out_cols[0]; CUDA_KERNEL_LOOP_TYPE(tid_x, in_col, int64_t) {
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int64_t curr_col_offset = out_cols[curr_segment + 1];
int curr_col_offset = out_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) { while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset; curr_offset = curr_col_offset;
++curr_segment; ++curr_segment;
curr_col_offset = out_cols[curr_segment + 1]; curr_col_offset = out_cols[curr_segment + 1];
} }
int local_col = tid_x - curr_offset; int64_t local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset; int64_t segment_width = curr_col_offset - curr_offset;
T* output_ptr = outputs_data[curr_segment]; T* output_ptr = outputs_data[curr_segment];
if (output_ptr != nullptr) { if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y; int64_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * segment_width + local_col] = output_ptr[tid_y * segment_width + local_col] =
input_data[tid_y * in_col + tid_x]; input_data[tid_y * in_col + tid_x];
...@@ -158,17 +155,16 @@ __global__ void SplitKernel_(const T* input_data, ...@@ -158,17 +155,16 @@ __global__ void SplitKernel_(const T* input_data,
template <typename T> template <typename T>
__device__ void SplitKernelDetail(const T* input_data, __device__ void SplitKernelDetail(const T* input_data,
const int in_row, const int64_t in_row,
const int in_col, const int64_t in_col,
const int fixed_out_col, const int64_t fixed_out_col,
T** outputs_data) { T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x; CUDA_KERNEL_LOOP_TYPE(tid_x, in_col, int64_t) {
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int64_t split = tid_x / fixed_out_col;
int split = tid_x / fixed_out_col; int64_t in_offset = tid_x - split * fixed_out_col;
int in_offset = tid_x - split * fixed_out_col;
T* output_ptr = outputs_data[split]; T* output_ptr = outputs_data[split];
if (output_ptr != nullptr) { if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y; int64_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * fixed_out_col + in_offset] = output_ptr[tid_y * fixed_out_col + in_offset] =
input_data[tid_y * in_col + tid_x]; input_data[tid_y * in_col + tid_x];
...@@ -266,7 +262,7 @@ struct ConcatFunctor<phi::GPUContext, T> { ...@@ -266,7 +262,7 @@ struct ConcatFunctor<phi::GPUContext, T> {
int axis, int axis,
phi::DenseTensor* output) { phi::DenseTensor* output) {
// TODO(zcd): Add input data validity checking // TODO(zcd): Add input data validity checking
int in_num = input.size(); int64_t in_num = input.size();
int64_t in_row = 1; int64_t in_row = 1;
auto dim_0 = input[0].dims(); auto dim_0 = input[0].dims();
for (int i = 0; i < axis; ++i) { for (int i = 0; i < axis; ++i) {
...@@ -275,7 +271,7 @@ struct ConcatFunctor<phi::GPUContext, T> { ...@@ -275,7 +271,7 @@ struct ConcatFunctor<phi::GPUContext, T> {
int64_t in_col = input[0].numel() / in_row; int64_t in_col = input[0].numel() / in_row;
int64_t out_row = in_row, out_col = 0; int64_t out_row = in_row, out_col = 0;
int inputs_col_num = in_num + 1; int64_t inputs_col_num = in_num + 1;
std::vector<const T*> inputs_data_vec(in_num); std::vector<const T*> inputs_data_vec(in_num);
std::vector<int64_t> inputs_col_vec(inputs_col_num); std::vector<int64_t> inputs_col_vec(inputs_col_num);
const T** inputs_data = inputs_data_vec.data(); const T** inputs_data = inputs_data_vec.data();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册