提交 7ac00dd6 编写于 作者: C chengduoZH

refine

上级 49df2a78
...@@ -149,6 +149,44 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -149,6 +149,44 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
} }
}; };
template <typename T>
struct CosSimDyFunctor<platform::CPUDeviceContext, T> {
CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y,
const T* z, const T* dz, T* dy, int cols)
: x_norm_(x_norm),
y_norm_(y_norm),
x_(x),
y_(y),
z_(z),
dz_(dz),
dy_(dy),
cols_(static_cast<size_t>(cols)) {}
inline void operator()(size_t offset) const {
auto xy_norm_prod = x_norm_[offset] * y_norm_[0];
auto dz = dz_[offset];
auto z = z_[offset];
auto* x = x_ + cols_ * offset;
auto reciprocal_xy_norm_prod = 1 / xy_norm_prod;
auto y_norm_square = y_norm_[0] * y_norm_[0];
auto reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols_; ++i) {
dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod -
z * y_[i] * reciprocal_y_norm_square);
}
}
const T* x_norm_;
const T* y_norm_;
const T* x_;
const T* y_;
const T* z_;
const T* dz_;
T* dy_;
const size_t cols_;
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
......
...@@ -15,6 +15,51 @@ ...@@ -15,6 +15,51 @@
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/cos_sim_op.h" #include "paddle/operators/cos_sim_op.h"
namespace paddle {
namespace operators {
template <typename T>
struct CosSimDyFunctor<platform::CUDADeviceContext, T> {
CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y,
const T* z, const T* dz, T* dy, int cols)
: x_norm_(x_norm),
y_norm_(y_norm),
x_(x),
y_(y),
z_(z),
dz_(dz),
dy_(dy),
cols_(static_cast<size_t>(cols)) {}
inline void operator()(size_t offset) const {
auto xy_norm_prod = x_norm_[offset] * y_norm_[0];
auto dz = dz_[offset];
auto z = z_[offset];
auto* x = x_ + cols_ * offset;
auto reciprocal_xy_norm_prod = 1 / xy_norm_prod;
auto y_norm_square = y_norm_[0] * y_norm_[0];
auto reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols_; ++i) {
T dy = dz * (x[i] * reciprocal_xy_norm_prod -
z * y_[i] * reciprocal_y_norm_square);
paddle::paddleAtomicAdd(dy_ + i, dy)
}
}
const T* x_norm_;
const T* y_norm_;
const T* x_;
const T* y_;
const T* z_;
const T* dz_;
T* dy_;
const size_t cols_;
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
cos_sim, ops::CosSimKernel<paddle::platform::CUDADeviceContext, float>); cos_sim, ops::CosSimKernel<paddle::platform::CUDADeviceContext, float>);
......
...@@ -21,10 +21,17 @@ namespace operators { ...@@ -21,10 +21,17 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
template <typename IT1, typename IT2, typename Callback> template <typename DeviceContext, typename T>
static void ForEachZip(IT1 begin1, IT1 last1, IT2 begin2, Callback callback) { struct CosSimDyFunctor {
for (; begin1 < last1; ++begin1, ++begin2) { CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y,
callback(*begin1, *begin2); const T* z, const T* dz, T* dy, int cols);
inline void operator()(size_t) const;
};
template <typename Callback>
static void ForEachZip(size_t num, Callback callback) {
for (size_t i = 0; i < num; ++i) {
callback(i);
} }
} }
...@@ -38,16 +45,11 @@ struct CosSimFunctor { ...@@ -38,16 +45,11 @@ struct CosSimFunctor {
z_(z), z_(z),
cols_(static_cast<size_t>(cols)) {} cols_(static_cast<size_t>(cols)) {}
inline void operator()(T& x_norm, T& y_norm) const { inline HOSTDEVICE void operator()(size_t offset) const {
size_t x_offset = &x_norm - x_norm_; auto* x = x_ + cols_ * offset;
size_t y_offset = &y_norm - y_norm_; T xx = 0, xy = 0, yy = 0;
auto* x = x_ + cols_ * x_offset;
T xx = 0, xy = 0;
T yy = 0;
if (same_row) { if (same_row) {
auto* y = y_ + cols_ * y_offset; auto* y = y_ + cols_ * offset;
for (size_t i = 0; i < cols_; ++i) { for (size_t i = 0; i < cols_; ++i) {
xx += x[i] * x[i]; xx += x[i] * x[i];
yy += y[i] * y[i]; yy += y[i] * y[i];
...@@ -55,21 +57,20 @@ struct CosSimFunctor { ...@@ -55,21 +57,20 @@ struct CosSimFunctor {
} }
xx = sqrt(xx); xx = sqrt(xx);
yy = sqrt(yy); yy = sqrt(yy);
x_norm_[x_offset] = xx; y_norm_[offset] = yy;
y_norm_[y_offset] = yy; x_norm_[offset] = xx;
z_[x_offset] = xy / (xx * yy); z_[offset] = xy / (xx * yy);
} else { // This can be wrote in a better way. } else { // This can be wrote in a better way.
auto* y = y_;
for (size_t i = 0; i < cols_; ++i) { for (size_t i = 0; i < cols_; ++i) {
xx += x[i] * x[i]; xx += x[i] * x[i];
yy += y[i] * y[i]; // only need yy += y_[i] * y_[i]; // only need
xy += x[i] * y[i]; xy += x[i] * y_[i];
} }
xx = sqrt(xx); xx = sqrt(xx);
yy = sqrt(yy); yy = sqrt(yy);
x_norm_[x_offset] = xx;
y_norm_[0] = yy; y_norm_[0] = yy;
z_[x_offset] = xy / (xx * yy); x_norm_[offset] = xx;
z_[offset] = xy / (xx * yy);
} }
} }
...@@ -104,14 +105,12 @@ class CosSimKernel : public framework::OpKernel<T> { ...@@ -104,14 +105,12 @@ class CosSimKernel : public framework::OpKernel<T> {
CosSimFunctor<T, true> functor( CosSimFunctor<T, true> functor(
in_x->data<T>(), in_y->data<T>(), out_x_norm->data<T>(), in_x->data<T>(), in_y->data<T>(), out_x_norm->data<T>(),
out_y_norm->data<T>(), out_z->data<T>(), cols); out_y_norm->data<T>(), out_z->data<T>(), cols);
ForEachZip(out_x_norm->data<T>(), out_x_norm->data<T>() + rows_x, ForEachZip(rows_x, functor);
out_y_norm->data<T>(), functor);
} else { } else {
CosSimFunctor<T, false> functor( CosSimFunctor<T, false> functor(
in_x->data<T>(), in_y->data<T>(), out_x_norm->data<T>(), in_x->data<T>(), in_y->data<T>(), out_x_norm->data<T>(),
out_y_norm->data<T>(), out_z->data<T>(), cols); out_y_norm->data<T>(), out_z->data<T>(), cols);
ForEachZip(out_x_norm->data<T>(), out_x_norm->data<T>() + rows_x, ForEachZip(rows_x, functor);
out_y_norm->data<T>(), functor);
} }
} }
}; };
...@@ -129,19 +128,15 @@ struct CosSimGradFunctor { ...@@ -129,19 +128,15 @@ struct CosSimGradFunctor {
dx_(dx), dx_(dx),
cols_(static_cast<size_t>(cols)) {} cols_(static_cast<size_t>(cols)) {}
inline void operator()(const T& x_norm, const T& y_norm) const { inline HOSTDEVICE void operator()(size_t offset) const {
size_t x_offset = &x_norm - x_norm_; auto x_norm_square = x_norm_[offset] * x_norm_[offset];
size_t y_offset = &y_norm - y_norm_; auto xy_norm_prod = x_norm_[offset] * y_norm_[offset];
auto dz = dz_[offset];
auto z = z_[offset];
auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; auto* dx = dx_ + cols_ * offset;
auto xy_norm_prod = x_norm_[x_offset] * y_norm_[y_offset]; auto* x = x_ + cols_ * offset;
auto dz = dz_[x_offset]; auto* y = y_ + cols_ * offset;
auto z = z_[x_offset];
auto* dx = dx_ + cols_ * x_offset;
auto* x = x_ + cols_ * x_offset;
auto* y = y_ + cols_ * y_offset;
auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod;
auto reciprocal_x_norm_square = 1 / x_norm_square; auto reciprocal_x_norm_square = 1 / x_norm_square;
...@@ -161,10 +156,10 @@ struct CosSimGradFunctor { ...@@ -161,10 +156,10 @@ struct CosSimGradFunctor {
const size_t cols_; const size_t cols_;
}; };
template <typename T, bool Dx> template <typename T>
struct CosSimDxFunctor { struct CosSimDxFunctor {
CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y,
const T* z, const T* dz, T* dx, T* dy, int cols) const T* z, const T* dz, T* dx, int cols)
: x_norm_(x_norm), : x_norm_(x_norm),
y_norm_(y_norm), y_norm_(y_norm),
x_(x), x_(x),
...@@ -172,37 +167,23 @@ struct CosSimDxFunctor { ...@@ -172,37 +167,23 @@ struct CosSimDxFunctor {
z_(z), z_(z),
dz_(dz), dz_(dz),
dx_(dx), dx_(dx),
dy_(dy),
cols_(static_cast<size_t>(cols)) {} cols_(static_cast<size_t>(cols)) {}
inline void operator()(const T& x_norm, const T& y_norm) const { inline HOSTDEVICE void operator()(size_t offset) const {
size_t x_offset = &x_norm - x_norm_; auto xy_norm_prod = x_norm_[offset] * y_norm_[0];
auto dz = dz_[offset];
auto xy_norm_prod = x_norm_[x_offset] * y_norm_[0]; auto z = z_[offset];
auto dz = dz_[x_offset]; auto* x = x_ + cols_ * offset;
auto z = z_[x_offset];
auto* x = x_ + cols_ * x_offset;
auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod;
auto x_norm_square = x_norm_[offset] * x_norm_[offset];
if (Dx) { auto* dx = dx_ + cols_ * offset;
auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset];
auto* dx = dx_ + cols_ * x_offset;
auto* x = x_ + cols_ * x_offset;
auto reciprocal_x_norm_square = 1 / x_norm_square; auto reciprocal_x_norm_square = 1 / x_norm_square;
for (size_t i = 0; i < cols_; ++i) { for (size_t i = 0; i < cols_; ++i) {
dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod - dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod -
z * x[i] * reciprocal_x_norm_square); z * x[i] * reciprocal_x_norm_square);
} }
} else {
auto y_norm_square = y_norm_[0] * y_norm_[0];
auto reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols_; ++i) {
dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod -
z * y_[i] * reciprocal_y_norm_square);
}
}
} }
const T* x_norm_; const T* x_norm_;
const T* y_norm_; const T* y_norm_;
const T* x_; const T* x_;
...@@ -210,7 +191,6 @@ struct CosSimDxFunctor { ...@@ -210,7 +191,6 @@ struct CosSimDxFunctor {
const T* z_; const T* z_;
const T* dz_; const T* dz_;
T* dx_; T* dx_;
T* dy_;
const size_t cols_; const size_t cols_;
}; };
...@@ -239,33 +219,34 @@ class CosSimGradKernel : public framework::OpKernel<T> { ...@@ -239,33 +219,34 @@ class CosSimGradKernel : public framework::OpKernel<T> {
in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(), in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(),
in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(), in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
out_grad_x->mutable_data<T>(context.GetPlace()), cols); out_grad_x->mutable_data<T>(context.GetPlace()), cols);
ForEachZip(in_x_norm->data<T>(), in_x_norm->data<T>() + rows_x, ForEachZip(rows_x, functor);
in_y_norm->data<T>(), functor);
} }
if (out_grad_y) { if (out_grad_y) {
CosSimGradFunctor<T> functor( CosSimGradFunctor<T> functor(
in_y_norm->data<T>(), in_x_norm->data<T>(), in_y->data<T>(), in_y_norm->data<T>(), in_x_norm->data<T>(), in_y->data<T>(),
in_x->data<T>(), in_z->data<T>(), in_grad_z->data<T>(), in_x->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
out_grad_y->mutable_data<T>(context.GetPlace()), cols); out_grad_y->mutable_data<T>(context.GetPlace()), cols);
ForEachZip(in_y_norm->data<T>(), in_y_norm->data<T>() + rows_x, ForEachZip(rows_x, functor);
in_x_norm->data<T>(), functor);
} }
} else { } else {
if (out_grad_x) { if (out_grad_x) {
CosSimDxFunctor<T, true> functor( CosSimDxFunctor<T> functor(
in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(), in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(),
in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(), in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
out_grad_x->mutable_data<T>(context.GetPlace()), nullptr, cols); out_grad_x->mutable_data<T>(context.GetPlace()), cols);
ForEachZip(in_x_norm->data<T>(), in_x_norm->data<T>() + rows_x, ForEachZip(rows_x, functor);
in_y_norm->data<T>(), functor);
} }
if (out_grad_y) { if (out_grad_y) {
CosSimDxFunctor<T, false> functor( out_grad_y->mutable_data<T>(context.GetPlace());
math::SetConstant<DeviceContext, T> set_zero;
auto& dev_ctx = context.template device_context<DeviceContext>();
set_zero(dev_ctx, out_grad_y, static_cast<T>(0));
CosSimDyFunctor<DeviceContext, T> functor(
in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(), in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(),
in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(), nullptr, in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
out_grad_y->mutable_data<T>(context.GetPlace()), cols); out_grad_y->data<T>(), cols);
ForEachZip(in_x_norm->data<T>(), in_x_norm->data<T>() + rows_x, ForEachZip(rows_x, functor);
in_y_norm->data<T>(), functor);
} }
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册