// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.1 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.1 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" namespace paddle { namespace operators { #define MAX_INPUT_NUM 3 // the max num of ET for BroadcacstConfig namespace kps = paddle::operators::kernel_primitives; struct DimensionsTransform { using DimVector = std::vector; typedef void (*MergeFunctor)(bool &, std::vector &, DimVector &, int, int); int64_t dim_size; DimVector out_dims; std::vector in_dims; private: // To compensate the lackage of input_tensors` dimension with input variable // 'axis' void InputDimensionsExtend(int N, int axis) { for (auto &in_dim : in_dims) { int64_t in_idx = 0; if (in_dim.size() < dim_size) { DimVector tmp_dim(dim_size, 1); do { if (in_dim[in_idx] == out_dims[axis] || in_dim[in_idx] == 1) { tmp_dim[axis] = in_dim[in_idx]; in_idx++; axis++; } else { PADDLE_THROW(platform::errors::InvalidArgument( "The %dth dimension of input tensor is expected to be equal " "with" "the %dth dimension of output tensor %d or 1, but recieved " "%d.\n", in_idx + 1, axis + 1, out_dims[axis], in_dim[in_idx])); } } while (in_idx < in_dim.size()); in_dim.resize(dim_size); std::copy(tmp_dim.begin(), tmp_dim.end(), in_dim.begin()); } else { do { if (in_dim[in_idx] == out_dims[in_idx] || in_dim[in_idx] == 1) { in_idx++; } else { PADDLE_THROW(platform::errors::InvalidArgument( "The %dth dimension of input tensor is expected to be equal " "with" "the %dth dimension of output tensor %d or 1, but recieved " "%d.\n", in_idx + 1, in_idx + 1, out_dims[in_idx], in_dim[in_idx])); } } while (in_idx < dim_size); } std::reverse(in_dim.begin(), in_dim.end()); } std::reverse(out_dims.begin(), out_dims.end()); } template __inline__ void MergeDimensions(MergeFunctor merge_func, int N) { auto VectorReorganise = [](DimVector *vec, int l_idx, int m_idx) { (*vec)[m_idx - 1] = std::accumulate(vec->begin() + l_idx, vec->begin() + m_idx, 1, std::multiplies()); vec->erase(vec->begin() + l_idx, vec->begin() + m_idx - 1); }; int64_t i = 0; while (i < dim_size) { int cnt = 0; int low_idx = i; bool equal = true; do { merge_func(equal, in_dims, out_dims, i, N); if (equal) { i++; cnt++; } else { break; } } while (i < dim_size); if (cnt > 1) { for (auto &in_dim : in_dims) { VectorReorganise(&in_dim, low_idx, i); } VectorReorganise(&out_dims, low_idx, i); dim_size -= --cnt; i -= cnt; } else if (cnt < 1) { i++; } } } public: explicit DimensionsTransform( const std::vector &ins, const framework::DDim &dims, int axis) { const int N = ins.size(); dim_size = dims.size(); out_dims = framework::vectorize(dims); in_dims.resize(N); for (int j = 0; j < N; ++j) { in_dims[j] = framework::vectorize(ins[j]->dims()); } InputDimensionsExtend(N, axis); auto merge_sequential_dims = [](bool &equal, std::vector &in_dims, DimVector &out, int i, int num) { for (int j = 1; j < num; ++j) { equal = (in_dims[0][i] == in_dims[j][i]) ? true : false; } }; auto merge_sequential_one_dims = [](bool &equal, std::vector &in_dims, DimVector &out, int i, int num) { equal = in_dims[0][i] == 1; if (equal) { for (int j = 1; j < num; ++j) { equal = in_dims[j][i] == out[i]; } } }; // To Merge the dimensions of input_tensors while the consequtive // equal-dimensions appears. MergeFunctor merge_ptr = merge_sequential_dims; MergeDimensions(merge_ptr, N); int min_idx = 0; int min_val = std::accumulate(in_dims[0].begin(), in_dims[0].end(), 1, std::multiplies()); for (int j = 1; j < N; ++j) { int temp = std::accumulate(in_dims[j].begin(), in_dims[j].end(), 1, std::multiplies()); min_val = min_val > temp ? temp : min_val; min_idx = min_val == temp ? j : min_idx; } std::swap(in_dims[0], in_dims[min_idx]); // To Merge the dimension of input_tensors while the consequtive // 1-value-dimensions appears. merge_ptr = merge_sequential_one_dims; MergeDimensions(merge_ptr, N); std::swap(in_dims[min_idx], in_dims[0]); } }; template __device__ __forceinline__ void LoadData( T *dst, const T *__restrict__ src, uint32_t block_offset, const kps::details::BroadcastConfig &config, int numel, int num, bool need_broadcast) { // numel : whole num of output // num: how many data will be deal with in this time if (need_broadcast) { kps::ReadDataBc( dst, src, block_offset, config, numel, 1, 1); } else { kps::ReadData(dst, src + block_offset, num); } } template __device__ void DealSegment( const framework::Array &in, OutT *out, const framework::Array &use_broadcast, uint32_t numel, const framework::Array, MAX_INPUT_NUM> &configlists, int num, Functor func) { InT args[ET][VecSize]; OutT result[VecSize]; int block_offset = blockIdx.x * blockDim.x * VecSize; // load #pragma unroll for (int i = 0; i < ET; i++) { kps::Init(args[i], static_cast(1.0f)); LoadData(args[i], in[i], block_offset, configlists[i], numel, num, use_broadcast[i]); } // compute if (ET == kUnary) { kps::ElementwiseUnary(result, args[0], func); } else if (ET == kBinary) { kps::ElementwiseBinary(result, args[0], args[1], func); } else { kps::ElementwiseTernary( result, args[0], args[1], args[2], func); } // compute kps::WriteData(out + block_offset, result, num); } template __global__ void BroadcastKernel( framework::Array in, OutT *out, framework::Array use_broadcast, uint32_t numel, framework::Array, MAX_INPUT_NUM> configlists, int main_tid, int tail_tid, Functor func) { int block_offset = blockIdx.x * blockDim.x * VecSize; // data offset of this block if (blockIdx.x < main_tid) { int num = blockDim.x * VecSize; // blockIdx.x < main_tid DealSegment( in, out, use_broadcast, numel, configlists, num, func); } else { // reminder int num = tail_tid; DealSegment( in, out, use_broadcast, numel, configlists, num, func); } } template void LaunchKernel(const platform::CUDADeviceContext &ctx, const std::vector &ins, framework::Tensor *out, Functor func, DimensionsTransform merge_dims) { int numel = out->numel(); const int threads = 256; int blocks = ((numel + VecSize - 1) / VecSize + threads - 1) / threads; int main_tid = numel / (VecSize * threads); int tail_tid = numel % (VecSize * threads); auto stream = ctx.stream(); OutT *out_data = out->data(); framework::Array, MAX_INPUT_NUM> configlists; framework::Array use_broadcast; framework::Array ins_data; for (int i = 0; i < ET; i++) { use_broadcast[i] = (ins[i]->numel() != numel); ins_data[i] = ins[i]->data(); if (use_broadcast[i]) { // get the broadcast config, // if data shape is[m, n], then you should set data_dim = {n, m} // eg: out's shape [3, 45, 1]. then out_dims = {1, 45, 3} configlists[i] = kps::details::BroadcastConfig( merge_dims.out_dims, merge_dims.in_dims[i], merge_dims.dim_size); } } BroadcastKernel<<>>( ins_data, out_data, use_broadcast, numel, configlists, main_tid, tail_tid, func); } template void LaunchBroadcastKernelForDifferentDimSize( const platform::CUDADeviceContext &ctx, const std::vector &ins, framework::Tensor *out, int axis, Functor func) { const auto merge_dims = DimensionsTransform(ins, out->dims(), axis); #define DIM_SIZE(size) \ case size: { \ LaunchKernel(ctx, ins, out, func, \ merge_dims); \ } break; switch (merge_dims.dim_size) { DIM_SIZE(1); DIM_SIZE(2); DIM_SIZE(3); DIM_SIZE(4); DIM_SIZE(5); DIM_SIZE(6); DIM_SIZE(7); DIM_SIZE(8); } #undef DIM_SIZE } template void LaunchBroadcastElementwiseCudaKernel( const platform::CUDADeviceContext &ctx, const std::vector &ins, std::vector *outs, int axis, Functor func) { PADDLE_ENFORCE_EQ(ET, ElementwiseType::kBinary, platform::errors::InvalidArgument( "Currently, only Support binary calculation, " "but received %d input tensors.\n", static_cast(ET))); int in_vec_size = 4; framework::Tensor *out = (*outs)[0]; for (auto *in : ins) { auto temp_size = platform::GetVectorizedSize(in->data()); in_vec_size = in->dims() == out->dims() ? std::min(temp_size, in_vec_size) : in_vec_size; } int out_vec_size = platform::GetVectorizedSize(out->data()); int vec_size = std::min(out_vec_size, in_vec_size); switch (vec_size) { case 4: { LaunchBroadcastKernelForDifferentDimSize(ctx, ins, out, axis, func); break; } case 2: { LaunchBroadcastKernelForDifferentDimSize(ctx, ins, out, axis, func); break; } case 1: { LaunchBroadcastKernelForDifferentDimSize(ctx, ins, out, axis, func); break; } default: { PADDLE_THROW(platform::errors::Unimplemented( "Unsupported vectorized size: %d !", vec_size)); break; } } } template void LaunchElementwiseCudaKernel( const platform::CUDADeviceContext &cuda_ctx, const std::vector &ins, std::vector *outs, int axis, Functor func) { std::vector dims_size; bool no_broadcast_flag = true; for (auto *in : ins) { no_broadcast_flag = ins[0]->dims() == in->dims(); dims_size.emplace_back(in->dims().size()); } if (no_broadcast_flag) { LaunchSameDimsElementwiseCudaKernel(cuda_ctx, ins, outs, func); } else { axis = axis == -1 ? *std::max_element(dims_size.begin(), dims_size.end()) - *std::min_element(dims_size.begin(), dims_size.end()) : axis; LaunchBroadcastElementwiseCudaKernel(cuda_ctx, ins, outs, axis, func); } } #undef MAX_INPUT_NUM } // namespace operators } // namespace paddle