提交 05df5200 编写于 作者: L Liu Yiqun

Change all reference to const reference.

上级 dea6154b
......@@ -135,13 +135,13 @@ class FcCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
bool with_relu = (param.activation_type == "relu") ? true : false;
bool padding_weights = param.padding_weights;
auto& w_dims = w->dims();
const auto& w_dims = w->dims();
auto w_dims0 = padding_weights ? w_dims[0] - 4 : w_dims[0];
auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1];
DDim out_dims;
out_dims.resize(static_cast<size_t>(in_num_col_dims + 1));
auto& in_dims = input->dims();
const auto& in_dims = input->dims();
for (int i = 0; i < in_num_col_dims; ++i) {
out_dims[i] = in_dims[i];
}
......
......@@ -28,8 +28,8 @@ bool GRUOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.batch_hidden)
CHECK_OR_FALSE(param_.hidden)
auto input_dims = param_.input->dims();
auto weight_dims = param_.weight->dims();
const auto &input_dims = param_.input->dims();
const auto &weight_dims = param_.weight->dims();
int input_size = input_dims[1];
int frame_size = weight_dims[0];
CHECK_EQ_OR_FALSE(input_size, frame_size * 3)
......@@ -52,14 +52,14 @@ bool GRUOpLite::CheckShape() const {
}
bool GRUOpLite::InferShape() const {
auto &input_dims = param_.input->dims();
auto &weight_dims = param_.weight->dims();
const auto &input_dims = param_.input->dims();
const auto &weight_dims = param_.weight->dims();
int frame_size = weight_dims[0];
auto batch_size = input_dims[0];
param_.batch_gate->Resize(input_dims);
auto out_dims = DDim({batch_size, frame_size});
DDim out_dims({batch_size, frame_size});
param_.batch_reset_hidden_prev->Resize(out_dims);
param_.batch_hidden->Resize(out_dims);
param_.hidden->Resize(out_dims);
......
......@@ -28,9 +28,9 @@ bool GRUUnitOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.hidden);
CHECK_OR_FALSE(param_.weight);
auto input_dims = param_.input->dims();
auto hidden_prev_dims = param_.hidden_prev->dims();
auto weight_dims = param_.weight->dims();
const auto &input_dims = param_.input->dims();
const auto &hidden_prev_dims = param_.hidden_prev->dims();
const auto &weight_dims = param_.weight->dims();
int input_size = input_dims[1];
int frame_size = hidden_prev_dims[1];
......@@ -52,8 +52,8 @@ bool GRUUnitOpLite::CheckShape() const {
}
bool GRUUnitOpLite::InferShape() const {
auto input_dims = param_.input->dims();
auto hidden_prev_dims = param_.hidden_prev->dims();
const auto &input_dims = param_.input->dims();
const auto &hidden_prev_dims = param_.hidden_prev->dims();
int batch_size = input_dims[0];
int frame_size = hidden_prev_dims[1];
......
......@@ -43,12 +43,10 @@ bool InstanceNormOp::CheckShape() const {
}
bool InstanceNormOp::InferShape() const {
auto x_dims = param_.x->dims();
int64_t batch_size = x_dims[0];
int64_t channel_size = x_dims[1];
param_.saved_mean->Resize(std::vector<int64_t>({batch_size * channel_size}));
param_.saved_variance->Resize(
std::vector<int64_t>({batch_size * channel_size}));
const auto& x_dims = param_.x->dims();
DDim saved_dims({x_dims[0] * x_dims[1]}); // batch_size * channel_size
param_.saved_mean->Resize(saved_dims);
param_.saved_variance->Resize(saved_dims);
param_.out->Resize(x_dims);
return true;
}
......
......@@ -25,8 +25,8 @@ bool LookupTableOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.Ids)
CHECK_OR_FALSE(param_.Out)
auto table_dims = param_.W->dims();
auto ids_dims = param_.Ids->dims();
const auto &table_dims = param_.W->dims();
const auto &ids_dims = param_.Ids->dims();
int ids_rank = ids_dims.size();
......@@ -37,8 +37,8 @@ bool LookupTableOpLite::CheckShape() const {
}
bool LookupTableOpLite::InferShape() const {
auto &table_dims = param_.W->dims();
auto &ids_dims = param_.Ids->dims();
const auto &table_dims = param_.W->dims();
const auto &ids_dims = param_.Ids->dims();
auto out_dims = ids_dims;
int ids_rank = ids_dims.size();
......
......@@ -45,8 +45,8 @@ bool MatchMatrixTensorOpLite::CheckShape() const {
bool MatchMatrixTensorOpLite::InferShape() const {
const Tensor* x = param_.x;
const Tensor* y = param_.y;
DDim x_dims = param_.x->dims();
DDim y_dims = param_.y->dims();
const auto& x_dims = param_.x->dims();
const auto& y_dims = param_.y->dims();
int dim_t = param_.dim_t;
const auto& x_lod = x->lod();
......
......@@ -29,11 +29,13 @@ bool ReduceOp::CheckShape() const {
}
bool ReduceOp::InferShape() const {
auto& x_dims = param_.x->dims();
const auto& x_dims = param_.x->dims();
auto x_rank = x_dims.size();
auto& dims = param_.dim;
auto dims = param_.dim;
for (size_t i = 0; i < dims.size(); ++i) {
if (dims[i] < 0) dims[i] = x_rank + dims[i];
if (dims[i] < 0) {
dims[i] = x_rank + dims[i];
}
CHECK_LT(dims[i], x_rank)
<< "The dim should be in the range [-rank(input), rank(input).";
}
......@@ -58,7 +60,7 @@ bool ReduceOp::InferShape() const {
int dim_index = 0;
int out_index = 0;
for (size_t i = 0; i < x_rank; ++i) {
if (dims[dim_index] == i) {
if (dims[dim_index] == static_cast<DDim::value_type>(i)) {
dim_index++;
} else {
out_dims[out_index++] = x_dims[i];
......
......@@ -27,11 +27,11 @@ bool ReshapeOp::CheckShape() const {
}
bool ReshapeOp::InferShape() const {
auto &shape_tensor_vct = param_.shape_tensor_vct;
const auto &shape_tensor_vct = param_.shape_tensor_vct;
auto *shape_tensor = param_.shape_tensor;
auto &shape_vct = param_.shape_vct;
std::vector<int> final_shape;
const auto &shape_vct = param_.shape_vct;
std::vector<int> final_shape;
if (shape_tensor_vct.size() > 0) {
final_shape.resize(shape_tensor_vct.size());
for (int i = 0; i < shape_tensor_vct.size(); i++) {
......@@ -47,7 +47,7 @@ bool ReshapeOp::InferShape() const {
LOG(FATAL) << "input shape error";
}
auto &x_dims = param_.x->dims();
const auto &x_dims = param_.x->dims();
auto output_dims = ValidateShape(final_shape, x_dims);
param_.output->Resize(output_dims);
auto out_lod = param_.output->mutable_lod();
......@@ -99,7 +99,7 @@ bool Reshape2Op::CheckShape() const {
bool Reshape2Op::InferShape() const {
ReshapeOp::InferShape();
auto &x_dims = param_.x->dims();
const auto &x_dims = param_.x->dims();
DDim xshape_dims;
xshape_dims.resize(x_dims.size() + 1);
xshape_dims[0] = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册