diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.h b/paddle/fluid/operators/hierarchical_sigmoid_op.h index 44853dafe9f4c117d51e0b6e86eae81bc1e6845b..f046fba7fc21951ce98e75bce5a89cfd7bb845ab 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.h +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.h @@ -30,14 +30,14 @@ template ; using platform::Transform; -std::vector cal_rows(const framework::LoDTensor* path) { +std::vector cal_rows(const framework::LoDTensor& path) { std::set tmp; std::vector rows; rows.clear(); - for (size_t i = 0; i < static_cast(path->dims()[0]); i++) { - for (size_t j = 0; j < static_cast(path->dims()[1]); j++) { + for (size_t i = 0; i < static_cast(path.dims()[0]); i++) { + for (size_t j = 0; j < static_cast(path.dims()[1]); j++) { int64_t temp = - path->data()[i * static_cast(path->dims()[1]) + j]; + path.data()[i * static_cast(path.dims()[1]) + j]; if (temp >= 0) { tmp.insert(temp); } @@ -188,7 +188,7 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel { zero(dev_ctx, w_grad, static_cast(0.0)); bit_code->MulGradWeight(pre_out_grad, w_grad, *in); } else { - framework::Vector real_rows = cal_rows(path); + framework::Vector real_rows = cal_rows(*path); auto* w_grad = ctx.Output(framework::GradVarName("W")); w_grad->set_rows(real_rows);