提交 a5f1e6d6 编写于 作者: X Xinghai Sun

Update cos_sim operator by following reviewer's comments.

上级 91215bce
7 合并请求!11636[IMPORTANT] MKLDNN layout: Support for sum operator,!8482Release/0.11.0,!8190Release/0.11.0,!8189Release/0.11.0,!6633给线性回归的get-started代码加上了预测的示例~~,!4615Feature/tensor array add python binding,!3815Add cosine similarity operator.
...@@ -90,8 +90,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -90,8 +90,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y")); auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
y_grad->Resize(y_dims); if (y_grad) y_grad->Resize(y_dims);
} }
}; };
......
...@@ -28,30 +28,30 @@ template <typename Place, typename T> ...@@ -28,30 +28,30 @@ template <typename Place, typename T>
class CosSimKernel : public framework::OpKernel { class CosSimKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X"); auto* input_x = context.Input<Tensor>("X");
auto* y = context.Input<Tensor>("Y"); auto* input_y = context.Input<Tensor>("Y");
auto* z = context.Output<Tensor>("Out"); auto* output_z = context.Output<Tensor>("Out");
auto* x_norm = context.Output<Tensor>("XNorm"); auto* output_x_norm = context.Output<Tensor>("XNorm");
auto* y_norm = context.Output<Tensor>("YNorm"); auto* output_y_norm = context.Output<Tensor>("YNorm");
z->mutable_data<T>(context.GetPlace()); output_z->mutable_data<T>(context.GetPlace());
x_norm->mutable_data<T>(context.GetPlace()); output_x_norm->mutable_data<T>(context.GetPlace());
y_norm->mutable_data<T>(context.GetPlace()); output_y_norm->mutable_data<T>(context.GetPlace());
auto dims = x->dims(); auto dims = input_x->dims();
int size = static_cast<int>(framework::product(dims)); int size = static_cast<int>(framework::product(dims));
auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto new_dims = framework::make_ddim({dims[0], size / dims[0]});
auto X = EigenMatrix<T>::From(*x, new_dims); auto x = EigenMatrix<T>::From(*input_x, new_dims);
auto Y = EigenMatrix<T>::From(*y, new_dims); auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto Z = EigenMatrix<T>::From(*z); auto z = EigenMatrix<T>::From(*output_z);
auto XNorm = EigenMatrix<T>::From(*x_norm); auto x_norm = EigenMatrix<T>::From(*output_x_norm);
auto YNorm = EigenMatrix<T>::From(*y_norm); auto y_norm = EigenMatrix<T>::From(*output_y_norm);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
auto XY = (X * Y).sum(Eigen::array<int, 1>({1})); auto xy = (x * y).sum(Eigen::array<int, 1>({1}));
XNorm.device(place) = (X * X).sum(Eigen::array<int, 1>({1})).sqrt(); x_norm.device(place) = x.square().sum(Eigen::array<int, 1>({1})).sqrt();
YNorm.device(place) = (Y * Y).sum(Eigen::array<int, 1>({1})).sqrt(); y_norm.device(place) = y.square().sum(Eigen::array<int, 1>({1})).sqrt();
Z.device(place) = XY / XNorm / YNorm; z.device(place) = xy / x_norm / y_norm;
} }
}; };
...@@ -59,41 +59,44 @@ template <typename Place, typename T> ...@@ -59,41 +59,44 @@ template <typename Place, typename T>
class CosSimGradKernel : public framework::OpKernel { class CosSimGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X"); auto* input_x = context.Input<Tensor>("X");
auto* y = context.Input<Tensor>("Y"); auto* input_y = context.Input<Tensor>("Y");
auto* z = context.Input<Tensor>("Out"); auto* input_z = context.Input<Tensor>("Out");
auto* x_norm = context.Input<Tensor>("XNorm"); auto* input_x_norm = context.Input<Tensor>("XNorm");
auto* y_norm = context.Input<Tensor>("YNorm"); auto* input_y_norm = context.Input<Tensor>("YNorm");
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* output_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Output<Tensor>(framework::GradVarName("Y")); auto* output_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
auto* grad_z = context.Input<Tensor>(framework::GradVarName("Out")); auto* input_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));
grad_x->mutable_data<T>(context.GetPlace()); auto dims = input_x->dims();
grad_y->mutable_data<T>(context.GetPlace());
auto dims = x->dims();
int size = static_cast<int>(framework::product(dims)); int size = static_cast<int>(framework::product(dims));
auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto new_dims = framework::make_ddim({dims[0], size / dims[0]});
auto X = EigenMatrix<T>::From(*x, new_dims); auto x = EigenMatrix<T>::From(*input_x, new_dims);
auto Y = EigenMatrix<T>::From(*y, new_dims); auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto Z = EigenMatrix<T>::From(*z); auto z = EigenMatrix<T>::From(*input_z);
auto X_norm = EigenMatrix<T>::From(*x_norm); auto x_norm = EigenMatrix<T>::From(*input_x_norm);
auto Y_norm = EigenMatrix<T>::From(*y_norm); auto y_norm = EigenMatrix<T>::From(*input_y_norm);
auto dX = EigenMatrix<T>::From(*grad_x, new_dims); auto dz = EigenMatrix<T>::From(*input_grad_z);
auto dY = EigenMatrix<T>::From(*grad_y, new_dims);
auto dZ = EigenMatrix<T>::From(*grad_z);
Eigen::DSizes<int, 2> bcast(1, new_dims[1]); Eigen::DSizes<int, 2> bcast(1, new_dims[1]);
auto Z_bcast = Z.broadcast(bcast); auto z_bcast = z.broadcast(bcast);
auto dZ_bcast = dZ.broadcast(bcast); auto dz_bcast = dz.broadcast(bcast);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
auto X_snorm_bcast = X_norm.square().eval().broadcast(bcast); auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast);
auto Y_snorm_bcast = Y_norm.square().eval().broadcast(bcast); auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast);
auto norm_prod_bcast = (X_norm * Y_norm).eval().broadcast(bcast); auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast);
dX.device(place) = if (output_grad_x) {
dZ_bcast * (Y / norm_prod_bcast - Z_bcast * X / X_snorm_bcast); output_grad_x->mutable_data<T>(context.GetPlace());
dY.device(place) = auto dx = EigenMatrix<T>::From(*output_grad_x, new_dims);
dZ_bcast * (X / norm_prod_bcast - Z_bcast * Y / Y_snorm_bcast); dx.device(place) =
dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast);
}
if (output_grad_y) {
output_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::From(*output_grad_y, new_dims);
dy.device(place) =
dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast);
}
} }
}; };
......
...@@ -24,26 +24,36 @@ class TestCosSimOp(unittest.TestCase): ...@@ -24,26 +24,36 @@ class TestCosSimOp(unittest.TestCase):
} }
class CosSimGradOpTest(GradientChecker): class TestCosSimGradOp(GradientChecker):
def test_cos_sim_2d(self): def setUp(self):
op = create_op("cos_sim") self.op = create_op("cos_sim")
inputs = { self.inputs = {
'X': np.random.random((10, 5)).astype("float32"), 'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32") 'Y': np.random.random((10, 5)).astype("float32")
} }
self.compare_grad(op, inputs)
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad( self.check_grad(
op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.05) self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05)
def test_cos_sim_3d(self): def test_ignore_x(self):
op = create_op("cos_sim") self.check_grad(
inputs = { self.op,
'X': np.random.random((10, 5, 2)).astype("float32"), self.inputs, ["Y"],
'Y': np.random.random((10, 5, 2)).astype("float32") "Out",
} max_relative_error=0.05,
self.compare_grad(op, inputs) no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad( self.check_grad(
op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.05) self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.05,
no_grad_set={"Y"})
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部