diff --git a/lite/kernels/x86/fc_compute.h b/lite/kernels/x86/fc_compute.h index 9c337fa0781e0251bcd7f5cf608f868ffecad32b..886be9c5c3385317c9f1fc64b6cfe7070b2cdeef 100644 --- a/lite/kernels/x86/fc_compute.h +++ b/lite/kernels/x86/fc_compute.h @@ -135,13 +135,13 @@ class FcCompute : public KernelLite { bool with_relu = (param.activation_type == "relu") ? true : false; bool padding_weights = param.padding_weights; - auto& w_dims = w->dims(); + const auto& w_dims = w->dims(); auto w_dims0 = padding_weights ? w_dims[0] - 4 : w_dims[0]; auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1]; DDim out_dims; out_dims.resize(static_cast(in_num_col_dims + 1)); - auto& in_dims = input->dims(); + const auto& in_dims = input->dims(); for (int i = 0; i < in_num_col_dims; ++i) { out_dims[i] = in_dims[i]; } diff --git a/lite/operators/gru_op.cc b/lite/operators/gru_op.cc index 55935dd7a49ceb98d80fd897be4d0585ca4e35af..a9c2f81255ee448f754c55cc77c822f8c60485bb 100644 --- a/lite/operators/gru_op.cc +++ b/lite/operators/gru_op.cc @@ -28,8 +28,8 @@ bool GRUOpLite::CheckShape() const { CHECK_OR_FALSE(param_.batch_hidden) CHECK_OR_FALSE(param_.hidden) - auto input_dims = param_.input->dims(); - auto weight_dims = param_.weight->dims(); + const auto &input_dims = param_.input->dims(); + const auto &weight_dims = param_.weight->dims(); int input_size = input_dims[1]; int frame_size = weight_dims[0]; CHECK_EQ_OR_FALSE(input_size, frame_size * 3) @@ -52,14 +52,14 @@ bool GRUOpLite::CheckShape() const { } bool GRUOpLite::InferShape() const { - auto &input_dims = param_.input->dims(); - auto &weight_dims = param_.weight->dims(); + const auto &input_dims = param_.input->dims(); + const auto &weight_dims = param_.weight->dims(); int frame_size = weight_dims[0]; auto batch_size = input_dims[0]; param_.batch_gate->Resize(input_dims); - auto out_dims = DDim({batch_size, frame_size}); + DDim out_dims({batch_size, frame_size}); param_.batch_reset_hidden_prev->Resize(out_dims); param_.batch_hidden->Resize(out_dims); param_.hidden->Resize(out_dims); diff --git a/lite/operators/gru_unit_op.cc b/lite/operators/gru_unit_op.cc index 00a03e3c6f4b1d095c0a044d3e7e54eff4116362..1edea5c62643345c74db04129f153da7d34e20c1 100644 --- a/lite/operators/gru_unit_op.cc +++ b/lite/operators/gru_unit_op.cc @@ -28,9 +28,9 @@ bool GRUUnitOpLite::CheckShape() const { CHECK_OR_FALSE(param_.hidden); CHECK_OR_FALSE(param_.weight); - auto input_dims = param_.input->dims(); - auto hidden_prev_dims = param_.hidden_prev->dims(); - auto weight_dims = param_.weight->dims(); + const auto &input_dims = param_.input->dims(); + const auto &hidden_prev_dims = param_.hidden_prev->dims(); + const auto &weight_dims = param_.weight->dims(); int input_size = input_dims[1]; int frame_size = hidden_prev_dims[1]; @@ -52,8 +52,8 @@ bool GRUUnitOpLite::CheckShape() const { } bool GRUUnitOpLite::InferShape() const { - auto input_dims = param_.input->dims(); - auto hidden_prev_dims = param_.hidden_prev->dims(); + const auto &input_dims = param_.input->dims(); + const auto &hidden_prev_dims = param_.hidden_prev->dims(); int batch_size = input_dims[0]; int frame_size = hidden_prev_dims[1]; diff --git a/lite/operators/instance_norm_op.cc b/lite/operators/instance_norm_op.cc index 261b647721a1647664b74bc066e3d8b49185625d..09051b2314038502cd856e64c8eba977a18a36f3 100644 --- a/lite/operators/instance_norm_op.cc +++ b/lite/operators/instance_norm_op.cc @@ -43,12 +43,10 @@ bool InstanceNormOp::CheckShape() const { } bool InstanceNormOp::InferShape() const { - auto x_dims = param_.x->dims(); - int64_t batch_size = x_dims[0]; - int64_t channel_size = x_dims[1]; - param_.saved_mean->Resize(std::vector({batch_size * channel_size})); - param_.saved_variance->Resize( - std::vector({batch_size * channel_size})); + const auto& x_dims = param_.x->dims(); + DDim saved_dims({x_dims[0] * x_dims[1]}); // batch_size * channel_size + param_.saved_mean->Resize(saved_dims); + param_.saved_variance->Resize(saved_dims); param_.out->Resize(x_dims); return true; } diff --git a/lite/operators/lookup_table_op.cc b/lite/operators/lookup_table_op.cc index 98f8062294f3606542ac110e7a838a93ffeda0cf..6ee37ebb512c569f3f945158714733f2940678ec 100644 --- a/lite/operators/lookup_table_op.cc +++ b/lite/operators/lookup_table_op.cc @@ -25,8 +25,8 @@ bool LookupTableOpLite::CheckShape() const { CHECK_OR_FALSE(param_.Ids) CHECK_OR_FALSE(param_.Out) - auto table_dims = param_.W->dims(); - auto ids_dims = param_.Ids->dims(); + const auto &table_dims = param_.W->dims(); + const auto &ids_dims = param_.Ids->dims(); int ids_rank = ids_dims.size(); @@ -37,8 +37,8 @@ bool LookupTableOpLite::CheckShape() const { } bool LookupTableOpLite::InferShape() const { - auto &table_dims = param_.W->dims(); - auto &ids_dims = param_.Ids->dims(); + const auto &table_dims = param_.W->dims(); + const auto &ids_dims = param_.Ids->dims(); auto out_dims = ids_dims; int ids_rank = ids_dims.size(); diff --git a/lite/operators/match_matrix_tensor_op.cc b/lite/operators/match_matrix_tensor_op.cc index e93b2d4451509a07ad413b841db8e8daa5e3cf24..09e6d5c0b17bc761b69380e70ba028cd221c60fb 100644 --- a/lite/operators/match_matrix_tensor_op.cc +++ b/lite/operators/match_matrix_tensor_op.cc @@ -45,8 +45,8 @@ bool MatchMatrixTensorOpLite::CheckShape() const { bool MatchMatrixTensorOpLite::InferShape() const { const Tensor* x = param_.x; const Tensor* y = param_.y; - DDim x_dims = param_.x->dims(); - DDim y_dims = param_.y->dims(); + const auto& x_dims = param_.x->dims(); + const auto& y_dims = param_.y->dims(); int dim_t = param_.dim_t; const auto& x_lod = x->lod(); diff --git a/lite/operators/reduce_ops.cc b/lite/operators/reduce_ops.cc index ba2e3dadc12c84d4dfca1f23133864e05876ceeb..ffacdf2a9086db859f25bfe3f4e04d6533665bff 100644 --- a/lite/operators/reduce_ops.cc +++ b/lite/operators/reduce_ops.cc @@ -29,11 +29,13 @@ bool ReduceOp::CheckShape() const { } bool ReduceOp::InferShape() const { - auto& x_dims = param_.x->dims(); + const auto& x_dims = param_.x->dims(); auto x_rank = x_dims.size(); - auto& dims = param_.dim; + auto dims = param_.dim; for (size_t i = 0; i < dims.size(); ++i) { - if (dims[i] < 0) dims[i] = x_rank + dims[i]; + if (dims[i] < 0) { + dims[i] = x_rank + dims[i]; + } CHECK_LT(dims[i], x_rank) << "The dim should be in the range [-rank(input), rank(input)."; } @@ -58,7 +60,7 @@ bool ReduceOp::InferShape() const { int dim_index = 0; int out_index = 0; for (size_t i = 0; i < x_rank; ++i) { - if (dims[dim_index] == i) { + if (dims[dim_index] == static_cast(i)) { dim_index++; } else { out_dims[out_index++] = x_dims[i]; diff --git a/lite/operators/reshape_op.cc b/lite/operators/reshape_op.cc index c69b592364048186449694fa061829de592d2f9c..d82e32f349201d7b4d9c7dcc1e2603e8a6e3c1c3 100644 --- a/lite/operators/reshape_op.cc +++ b/lite/operators/reshape_op.cc @@ -27,11 +27,11 @@ bool ReshapeOp::CheckShape() const { } bool ReshapeOp::InferShape() const { - auto &shape_tensor_vct = param_.shape_tensor_vct; + const auto &shape_tensor_vct = param_.shape_tensor_vct; auto *shape_tensor = param_.shape_tensor; - auto &shape_vct = param_.shape_vct; - std::vector final_shape; + const auto &shape_vct = param_.shape_vct; + std::vector final_shape; if (shape_tensor_vct.size() > 0) { final_shape.resize(shape_tensor_vct.size()); for (int i = 0; i < shape_tensor_vct.size(); i++) { @@ -47,7 +47,7 @@ bool ReshapeOp::InferShape() const { LOG(FATAL) << "input shape error"; } - auto &x_dims = param_.x->dims(); + const auto &x_dims = param_.x->dims(); auto output_dims = ValidateShape(final_shape, x_dims); param_.output->Resize(output_dims); auto out_lod = param_.output->mutable_lod(); @@ -99,7 +99,7 @@ bool Reshape2Op::CheckShape() const { bool Reshape2Op::InferShape() const { ReshapeOp::InferShape(); - auto &x_dims = param_.x->dims(); + const auto &x_dims = param_.x->dims(); DDim xshape_dims; xshape_dims.resize(x_dims.size() + 1); xshape_dims[0] = 0;