diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index c033af3b741ae26ad9d37b2164f87aa6e8651c6e..3d0af97aae79089d10d94cb636775c5c6c092d56 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -25,16 +25,30 @@ class CosSimOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + // notnull check PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); - PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), - ctx.Input("Y")->dims(), - "Dimensions of Input(X) and Input(Y) must be the same."); - - auto dims = ctx.Input("X")->dims(); - ctx.Output("Out")->Resize({dims[0], 1}); - ctx.Output("XNorm")->Resize({dims[0], 1}); - ctx.Output("YNorm")->Resize({dims[0], 1}); + + // shape check + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + + PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), + "Ranks of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_GE(x_dims.size(), 2, + "Rank of Input(X) must not be less than 2."); + PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()), + framework::slice_ddim(y_dims, 1, y_dims.size()), + "All dimensions except the 1st of Input(X) and Input(Y) " + "must be equal."); + PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, + "The 1st dimension of Input(Y) must be equal to Input(X) or" + " just 1 (which will be broadcasted to match Input(X))."); + + // resize tensor + ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.Output("XNorm")->Resize({x_dims[0], 1}); + ctx.Output("YNorm")->Resize({y_dims[0], 1}); } }; @@ -42,16 +56,27 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { public: CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The first input of cos_sim op."); - AddInput("Y", "The second input of cos_sim op."); + AddInput("X", "The 1st input of cos_sim op."); + AddInput("Y", "The 2nd input of cos_sim op."); AddOutput("Out", "The output of cos_sim op."); - AddOutput("XNorm", "Row norm of the first input.").AsIntermediate(); - AddOutput("YNorm", "Row norm of the second input.").AsIntermediate(); + AddOutput("XNorm", + "Norm of the first input, reduced along the 1st " + "dimension.") + .AsIntermediate(); + AddOutput("YNorm", + "Norm of the second input, reduced along the 1st " + "dimension.") + .AsIntermediate(); AddComment(R"DOC( Cosine Similarity Operator. -The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)) +The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)). + +Input(X) and Input(Y) must have the same shape, except that the 1st dimension +of Input(Y) could be just 1 (different from Input(X)), which will be +broadcasted to match the shape of Input(X) before computing their cosine +similarity. )DOC"); } }; @@ -62,32 +87,50 @@ class CosSimOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + // notnull check PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"), "Input(XNorm) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"), "Input(YNorm) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Out"), + "Input(Out) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) must not be null."); + // shape check auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto xnorm_dims = ctx.Input("XNorm")->dims(); auto ynorm_dims = ctx.Input("YNorm")->dims(); - auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - PADDLE_ENFORCE_EQ(x_dims, y_dims, - "Dimensions of Input(X) and Input(Y) must be the same."); - PADDLE_ENFORCE_EQ(xnorm_dims[0], x_dims[0], - "1st dimension of XNorm must equal that of Input(X)."); - PADDLE_ENFORCE_EQ(xnorm_dims[1], 1, "2st dimension of XNorm must be one."); - PADDLE_ENFORCE_EQ(ynorm_dims[0], y_dims[0], - "1st dimension of YNorm must equal that of Input(Y)."); - PADDLE_ENFORCE_EQ(ynorm_dims[1], 1, "2st dimension of YNorm must be one."); - PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], - "1st dimension of Out@GRAD must equal that of Input(X)"); - PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one."); - + auto out_dims = ctx.Input("Out")->dims(); + auto out_grad_dims = + ctx.Input(framework::GradVarName("Out"))->dims(); + + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Ranks of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_GE(x_dims.size(), 2, + "Rank of Input(X) must not be less than 2."); + PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()), + framework::slice_ddim(y_dims, 1, y_dims.size()), + "All dimensions except the 1st of Input(X) and Input(Y) " + "must be equal."); + PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, + "The 1st dimension of Input(Y) must be equal to Input(X) or" + " just 1 (which will be broadcasted to match Input(X))."); + auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1}); + auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1}); + PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims, + "Shape of Input(XNorm) must be [X.Dim(0), 1]."); + PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims, + "Shape of Input(YNorm) must be [Y.Dim(0), 1]."); + PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims, + "Shape of Input(Out) must be [X.Dim(0), 1]."); + PADDLE_ENFORCE_EQ(out_grad_dims, target_xnorm_dims, + "Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); + + // resize tensor auto *x_grad = ctx.Output(framework::GradVarName("X")); auto *y_grad = ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 0dc509952578497671a128374f77ce616a520909..318b63f3707cf77755de773a39b00aa30d2296d3 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -31,30 +31,38 @@ template class CosSimKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* input_x = context.Input("X"); - auto* input_y = context.Input("Y"); - auto* output_z = context.Output("Out"); - auto* output_x_norm = context.Output("XNorm"); - auto* output_y_norm = context.Output("YNorm"); + // get Tensor + auto* in_x = context.Input("X"); + auto* in_y = context.Input("Y"); + auto* out_z = context.Output("Out"); + auto* out_x_norm = context.Output("XNorm"); + auto* out_y_norm = context.Output("YNorm"); + out_z->mutable_data(context.GetPlace()); + out_x_norm->mutable_data(context.GetPlace()); + out_y_norm->mutable_data(context.GetPlace()); - output_z->mutable_data(context.GetPlace()); - output_x_norm->mutable_data(context.GetPlace()); - output_y_norm->mutable_data(context.GetPlace()); - - auto dims = input_x->dims(); - int64_t size = input_x->numel(); - auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); - auto x = EigenMatrix::From(*input_x, new_dims); - auto y = EigenMatrix::From(*input_y, new_dims); - auto z = EigenVector::Flatten(*output_z); - auto x_norm = EigenVector::Flatten(*output_x_norm); - auto y_norm = EigenVector::Flatten(*output_y_norm); + // convert Tensor to Eigen Tensor + int rows_x = in_x->dims()[0]; + int rows_y = in_y->dims()[0]; + auto x = EigenMatrix::Reshape(*in_x, 1); + auto y = EigenMatrix::Reshape(*in_y, 1); + auto z = EigenVector::Flatten(*out_z); + auto x_norm = EigenVector::Flatten(*out_x_norm); + auto y_norm = EigenVector::Flatten(*out_y_norm); + // compute auto place = context.GetEigenDevice(); - auto xy = (x * y).sum(Eigen::array({{1}})); - x_norm.device(place) = x.square().sum(Eigen::array({{1}})).sqrt(); - y_norm.device(place) = y.square().sum(Eigen::array({{1}})).sqrt(); - z.device(place) = xy / x_norm / y_norm; + auto row_along = Eigen::array({{1}}); + x_norm.device(place) = x.square().sum(row_along).sqrt(); + y_norm.device(place) = y.square().sum(row_along).sqrt(); + if (rows_x == rows_y) { + auto xy = (x * y).sum(Eigen::array({1})); + z.device(place) = xy / x_norm / y_norm; + } else { + Eigen::DSizes bcast(rows_x, 1); + auto xy = (x * y.broadcast(bcast)).sum(row_along); + z.device(place) = xy / x_norm / y_norm.broadcast(bcast); + } } }; @@ -62,43 +70,72 @@ template class CosSimGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* input_x = context.Input("X"); - auto* input_y = context.Input("Y"); - auto* input_z = context.Input("Out"); - auto* input_x_norm = context.Input("XNorm"); - auto* input_y_norm = context.Input("YNorm"); - auto* output_grad_x = context.Output(framework::GradVarName("X")); - auto* output_grad_y = context.Output(framework::GradVarName("Y")); - auto* input_grad_z = context.Input(framework::GradVarName("Out")); + // get Tensor + auto* in_x = context.Input("X"); + auto* in_y = context.Input("Y"); + auto* in_z = context.Input("Out"); + auto* in_x_norm = context.Input("XNorm"); + auto* in_y_norm = context.Input("YNorm"); + auto* out_grad_x = context.Output(framework::GradVarName("X")); + auto* out_grad_y = context.Output(framework::GradVarName("Y")); + auto* in_grad_z = context.Input(framework::GradVarName("Out")); - auto dims = input_x->dims(); - int64_t size = input_x->numel(); - auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); - auto x = EigenMatrix::From(*input_x, new_dims); - auto y = EigenMatrix::From(*input_y, new_dims); - auto z = EigenMatrix::From(*input_z); - auto x_norm = EigenMatrix::From(*input_x_norm); - auto y_norm = EigenMatrix::From(*input_y_norm); - auto dz = EigenMatrix::From(*input_grad_z); + // convert Tensor to Eigen Tensor + auto x = EigenMatrix::Reshape(*in_x, 1); + auto y = EigenMatrix::Reshape(*in_y, 1); + auto z = EigenMatrix::Reshape(*in_z, 1); + auto x_norm = EigenMatrix::Reshape(*in_x_norm, 1); + auto y_norm = EigenMatrix::Reshape(*in_y_norm, 1); + auto dz = EigenMatrix::Reshape(*in_grad_z, 1); - Eigen::DSizes bcast(1, new_dims[1]); - auto z_bcast = z.broadcast(bcast); - auto dz_bcast = dz.broadcast(bcast); + // compute gradident + int rows_x = in_x->dims()[0]; + int rows_y = in_y->dims()[0]; + int cols = framework::product(in_x->dims()) / rows_x; + Eigen::DSizes bcast_cols(1, cols); + auto z_bcast = z.broadcast(bcast_cols); + auto dz_bcast = dz.broadcast(bcast_cols); + auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols); auto place = context.GetEigenDevice(); - auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast); - auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast); - auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast); - if (output_grad_x) { - output_grad_x->mutable_data(context.GetPlace()); - auto dx = EigenMatrix::From(*output_grad_x, new_dims); - dx.device(place) = - dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast); - } - if (output_grad_y) { - output_grad_y->mutable_data(context.GetPlace()); - auto dy = EigenMatrix::From(*output_grad_y, new_dims); - dy.device(place) = - dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast); + if (rows_x == rows_y) { + auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols); + auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols); + // compute dx + if (out_grad_x) { + out_grad_x->mutable_data(context.GetPlace()); + auto dx = EigenMatrix::Reshape(*out_grad_x, 1); + auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast; + dx.device(place) = dz_bcast * grad; + } + // compute dy + if (out_grad_y) { + out_grad_y->mutable_data(context.GetPlace()); + auto dy = EigenMatrix::Reshape(*out_grad_y, 1); + auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast; + dy.device(place) = dz_bcast * grad; + } + } else { + Eigen::DSizes bcast_rows(rows_x, 1); + Eigen::DSizes bcast_rows_cols(rows_x, cols); + auto y_bcast = y.broadcast(bcast_rows); + auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols); + auto norm_prod_bcast = (x_norm * y_norm.eval().broadcast(bcast_rows)) + .eval() + .broadcast(bcast_cols); + // compute dx + if (out_grad_x) { + out_grad_x->mutable_data(context.GetPlace()); + auto dx = EigenMatrix::Reshape(*out_grad_x, 1); + auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast; + dx.device(place) = dz_bcast * grad; + } + // compute dy + if (out_grad_y) { + out_grad_y->mutable_data(context.GetPlace()); + auto dy = EigenMatrix::Reshape(*out_grad_y, 1); + auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast; + dy.device(place) = (dz_bcast * grad).sum(Eigen::array({0})); + } } } }; diff --git a/paddle/operators/onehot_cross_entropy_op.cc b/paddle/operators/onehot_cross_entropy_op.cc index d2362f7dd5b50d0ad351b55694129904b8980d81..29889926490cbe8448c8ddeecec51832af16c89e 100644 --- a/paddle/operators/onehot_cross_entropy_op.cc +++ b/paddle/operators/onehot_cross_entropy_op.cc @@ -29,7 +29,7 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2."); PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1."); PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]); - ctx.Output("Y")->Resize({X->dims()[0]}); + ctx.Output("Y")->Resize({X->dims()[0], 1}); } }; diff --git a/python/paddle/v2/framework/tests/test_cos_sim_op.py b/python/paddle/v2/framework/tests/test_cos_sim_op.py index 797cbd8cc5cf7f73d58ca713d02667731d5c8a0e..d314ce391ea2f10a8bd77c24e84fa3e1eebb6c73 100644 --- a/python/paddle/v2/framework/tests/test_cos_sim_op.py +++ b/python/paddle/v2/framework/tests/test_cos_sim_op.py @@ -7,8 +7,8 @@ class TestCosSimOp(OpTest): def setUp(self): self.op_type = "cos_sim" self.inputs = { - 'X': np.random.random((10, 5)).astype("float32"), - 'Y': np.random.random((10, 5)).astype("float32") + 'X': np.random.random((6, 5)).astype("float32"), + 'Y': np.random.random((6, 5)).astype("float32") } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) @@ -28,12 +28,66 @@ class TestCosSimOp(OpTest): def test_check_grad_ingore_x(self): self.check_grad( - ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) + ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X")) - def test_check_grad_ignore_y(self): + def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) -if __name__ == "__main__": +class TestCosSimOp2(TestCosSimOp): + def setUp(self): + self.op_type = "cos_sim" + self.inputs = { + 'X': np.random.random((6, 5)).astype("float32"), + 'Y': np.random.random((1, 5)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + +class TestCosSimOp3(TestCosSimOp): + def setUp(self): + self.op_type = "cos_sim" + self.inputs = { + 'X': np.random.random((6, 5, 2)).astype("float32"), + 'Y': np.random.random((6, 5, 2)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + +class TestCosSimOp4(TestCosSimOp): + def setUp(self): + self.op_type = "cos_sim" + self.inputs = { + 'X': np.random.random((6, 5, 2)).astype("float32"), + 'Y': np.random.random((1, 5, 2)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + +if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index c2fc102a8b8de82da5c3fc5fee273790325908f8..253e7b8a24465da63a7eacd7983eb831251e6230 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -8,20 +8,22 @@ class TestCrossEntropy(OpTest): self.op_type = "onehot_cross_entropy" batch_size = 30 class_num = 10 + X = numpy.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float32") - label = (class_num / 2) * numpy.ones(batch_size).astype("int32") - self.inputs = {'X': X, 'label': label} - Y = [] - for i in range(0, batch_size): - Y.append(-numpy.log(X[i][label[i]])) - self.outputs = {'Y': numpy.array(Y).astype("float32")} + labels = numpy.random.randint(0, class_num, batch_size, dtype="int32") + + cross_entropy = numpy.asmatrix( + [[-numpy.log(X[i][labels[i]])] for i in range(X.shape[0])], + dtype="float32") + self.inputs = {"X": X, "label": labels} + self.outputs = {"Y": cross_entropy} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y') + self.check_grad(["X"], "Y") if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 456b765e331fc4c80e6fd817c88d7ec533158ecb..9052e63b5683801da7c73be4de23013c949add98 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -22,7 +22,7 @@ class TestPadOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', max_relative_error=0.006) def initTestCase(self): self.shape = (16, 16)