From 81e145764d870ed1a408e3fec4fc0d4d17e1bbec Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 26 Nov 2018 12:58:52 +0000 Subject: [PATCH] refine code and comments, test=develop --- paddle/fluid/operators/hierarchical_sigmoid_op.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.h b/paddle/fluid/operators/hierarchical_sigmoid_op.h index 44853dafe9..f046fba7fc 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.h +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.h @@ -30,14 +30,14 @@ template ; using platform::Transform; -std::vector cal_rows(const framework::LoDTensor* path) { +std::vector cal_rows(const framework::LoDTensor& path) { std::set tmp; std::vector rows; rows.clear(); - for (size_t i = 0; i < static_cast(path->dims()[0]); i++) { - for (size_t j = 0; j < static_cast(path->dims()[1]); j++) { + for (size_t i = 0; i < static_cast(path.dims()[0]); i++) { + for (size_t j = 0; j < static_cast(path.dims()[1]); j++) { int64_t temp = - path->data()[i * static_cast(path->dims()[1]) + j]; + path.data()[i * static_cast(path.dims()[1]) + j]; if (temp >= 0) { tmp.insert(temp); } @@ -188,7 +188,7 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel { zero(dev_ctx, w_grad, static_cast(0.0)); bit_code->MulGradWeight(pre_out_grad, w_grad, *in); } else { - framework::Vector real_rows = cal_rows(path); + framework::Vector real_rows = cal_rows(*path); auto* w_grad = ctx.Output(framework::GradVarName("W")); w_grad->set_rows(real_rows); -- GitLab