提交 de26ae41 编写于 作者: C chengduoZH

add gpu code

上级 4f5e3d0d
...@@ -151,42 +151,26 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -151,42 +151,26 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
template <typename T> template <typename T>
struct CosSimDyFunctor<platform::CPUDeviceContext, T> { struct CosSimDyFunctor<platform::CPUDeviceContext, T> {
CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, inline void operator()(const platform::CPUDeviceContext& ctx, const T* x_norm,
const T* z, const T* dz, T* dy, int cols) const T* y_norm, const T* x, const T* y, const T* z,
: x_norm_(x_norm), const T* dz, const size_t rows, const size_t cols,
y_norm_(y_norm), T* dy) const {
x_(x), for (size_t offset = 0; offset < rows; ++offset) {
y_(y), auto xy_norm_prod = x_norm[offset] * y_norm[0];
z_(z), auto dz_data = dz[offset];
dz_(dz), auto z_data = z[offset];
dy_(dy), auto* x_data = x + cols * offset;
cols_(static_cast<size_t>(cols)) {}
inline HOSTDEVICE void operator()(size_t offset) const {
auto xy_norm_prod = x_norm_[offset] * y_norm_[0];
auto dz = dz_[offset];
auto z = z_[offset];
auto* x = x_ + cols_ * offset;
auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod;
auto y_norm_square = y_norm_[0] * y_norm_[0]; auto y_norm_square = y_norm[0] * y_norm[0];
auto reciprocal_y_norm_square = 1 / y_norm_square; auto reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols_; ++i) { for (size_t i = 0; i < cols; ++i) {
dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod - dy[i] += dz_data * (x_data[i] * reciprocal_xy_norm_prod -
z * y_[i] * reciprocal_y_norm_square); z_data * y[i] * reciprocal_y_norm_square);
}
} }
} }
const T* x_norm_;
const T* y_norm_;
const T* x_;
const T* y_;
const T* z_;
const T* dz_;
T* dy_;
const size_t cols_;
}; };
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
......
...@@ -20,45 +20,45 @@ namespace paddle { ...@@ -20,45 +20,45 @@ namespace paddle {
namespace operators { namespace operators {
template <typename T> template <typename T>
struct CosSimDyFunctor<platform::CUDADeviceContext, T> { __global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x,
CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, const T* y, const T* z, const T* dz,
const T* z, const T* dz, T* dy, int cols) const size_t rows, const size_t cols, T* dy) {
: x_norm_(x_norm), int grid_size = blockDim.x * gridDim.x;
y_norm_(y_norm), T y_norm_data = y_norm[0];
x_(x), for (int offset = blockIdx.x * blockDim.x + threadIdx.x; offset < rows;
y_(y), offset += grid_size) {
z_(z), T xy_norm_prod = x_norm[offset] * y_norm_data;
dz_(dz), T dz_data = dz[offset];
dy_(dy), T z_data = z[offset];
cols_(static_cast<size_t>(cols)) {} const T* x_data = x + cols * offset;
T reciprocal_xy_norm_prod = 1 / xy_norm_prod;
inline HOSTDEVICE void operator()(size_t offset) const {
auto xy_norm_prod = x_norm_[offset] * y_norm_[0];
auto dz = dz_[offset];
auto z = z_[offset];
auto* x = x_ + cols_ * offset;
auto reciprocal_xy_norm_prod = 1 / xy_norm_prod;
auto y_norm_square = y_norm_[0] * y_norm_[0]; T y_norm_square = y_norm_data * y_norm_data;
auto reciprocal_y_norm_square = 1 / y_norm_square; T reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols_; ++i) { for (size_t i = 0; i < cols; ++i) {
T dy = dz * (x[i] * reciprocal_xy_norm_prod - T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod -
z * y_[i] * reciprocal_y_norm_square); z_data * y[i] * reciprocal_y_norm_square);
// platform::CudaAtomicAdd(dy_ + i, dy); platform::CudaAtomicAdd(dy + i, dy_data);
dy_[i] += dy;
} }
} }
}
const T* x_norm_; template <typename T>
const T* y_norm_; struct CosSimDyFunctor<platform::CUDADeviceContext, T> {
const T* x_; inline void operator()(const platform::CUDADeviceContext& ctx,
const T* y_; const T* x_norm, const T* y_norm, const T* x,
const T* z_; const T* y, const T* z, const T* dz, const size_t rows,
const T* dz_; const size_t cols, T* dy) const {
T* dy_; const int block_size = 512;
const size_t cols_; dim3 threads(block_size, 1);
dim3 grid(1, (rows + block_size - 1) / block_size);
CosSimDyKernel<T><<<grid, threads, 0, ctx.stream()>>>(
x_norm, y_norm, x, y, z, dz, rows, cols, dy);
}
}; };
template struct CosSimDyFunctor<platform::CUDADeviceContext, float>;
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
......
...@@ -193,9 +193,10 @@ struct CosSimDxFunctor { ...@@ -193,9 +193,10 @@ struct CosSimDxFunctor {
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
struct CosSimDyFunctor { struct CosSimDyFunctor {
CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, inline void operator()(const DeviceContext& ctx, const T* x_norm,
const T* z, const T* dz, T* dy, int cols); const T* y_norm, const T* x, const T* y, const T* z,
inline HOSTDEVICE void operator()(size_t) const; const T* dz, const size_t rows, const size_t cols,
T* dy) const;
}; };
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
...@@ -255,14 +256,11 @@ class CosSimGradKernel : public framework::OpKernel<T> { ...@@ -255,14 +256,11 @@ class CosSimGradKernel : public framework::OpKernel<T> {
auto& dev_ctx = context.template device_context<DeviceContext>(); auto& dev_ctx = context.template device_context<DeviceContext>();
set_zero(dev_ctx, out_grad_y, static_cast<T>(0)); set_zero(dev_ctx, out_grad_y, static_cast<T>(0));
CosSimDyFunctor<DeviceContext, T> functor( CosSimDyFunctor<DeviceContext, T> functor;
in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(), functor(dev_ctx, in_x_norm->data<T>(), in_y_norm->data<T>(),
in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(), in_x->data<T>(), in_y->data<T>(), in_z->data<T>(),
out_grad_y->data<T>(), cols); in_grad_z->data<T>(), static_cast<size_t>(rows_x),
platform::ForRange<DeviceContext> for_range( static_cast<size_t>(cols), out_grad_y->data<T>());
static_cast<const DeviceContext&>(context.device_context()),
rows_x);
for_range(functor);
} }
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册