提交 05df5200 编写于 作者: L Liu Yiqun

Change all reference to const reference.

上级 dea6154b
...@@ -135,13 +135,13 @@ class FcCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> { ...@@ -135,13 +135,13 @@ class FcCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
bool with_relu = (param.activation_type == "relu") ? true : false; bool with_relu = (param.activation_type == "relu") ? true : false;
bool padding_weights = param.padding_weights; bool padding_weights = param.padding_weights;
auto& w_dims = w->dims(); const auto& w_dims = w->dims();
auto w_dims0 = padding_weights ? w_dims[0] - 4 : w_dims[0]; auto w_dims0 = padding_weights ? w_dims[0] - 4 : w_dims[0];
auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1]; auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1];
DDim out_dims; DDim out_dims;
out_dims.resize(static_cast<size_t>(in_num_col_dims + 1)); out_dims.resize(static_cast<size_t>(in_num_col_dims + 1));
auto& in_dims = input->dims(); const auto& in_dims = input->dims();
for (int i = 0; i < in_num_col_dims; ++i) { for (int i = 0; i < in_num_col_dims; ++i) {
out_dims[i] = in_dims[i]; out_dims[i] = in_dims[i];
} }
......
...@@ -28,8 +28,8 @@ bool GRUOpLite::CheckShape() const { ...@@ -28,8 +28,8 @@ bool GRUOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.batch_hidden) CHECK_OR_FALSE(param_.batch_hidden)
CHECK_OR_FALSE(param_.hidden) CHECK_OR_FALSE(param_.hidden)
auto input_dims = param_.input->dims(); const auto &input_dims = param_.input->dims();
auto weight_dims = param_.weight->dims(); const auto &weight_dims = param_.weight->dims();
int input_size = input_dims[1]; int input_size = input_dims[1];
int frame_size = weight_dims[0]; int frame_size = weight_dims[0];
CHECK_EQ_OR_FALSE(input_size, frame_size * 3) CHECK_EQ_OR_FALSE(input_size, frame_size * 3)
...@@ -52,14 +52,14 @@ bool GRUOpLite::CheckShape() const { ...@@ -52,14 +52,14 @@ bool GRUOpLite::CheckShape() const {
} }
bool GRUOpLite::InferShape() const { bool GRUOpLite::InferShape() const {
auto &input_dims = param_.input->dims(); const auto &input_dims = param_.input->dims();
auto &weight_dims = param_.weight->dims(); const auto &weight_dims = param_.weight->dims();
int frame_size = weight_dims[0]; int frame_size = weight_dims[0];
auto batch_size = input_dims[0]; auto batch_size = input_dims[0];
param_.batch_gate->Resize(input_dims); param_.batch_gate->Resize(input_dims);
auto out_dims = DDim({batch_size, frame_size}); DDim out_dims({batch_size, frame_size});
param_.batch_reset_hidden_prev->Resize(out_dims); param_.batch_reset_hidden_prev->Resize(out_dims);
param_.batch_hidden->Resize(out_dims); param_.batch_hidden->Resize(out_dims);
param_.hidden->Resize(out_dims); param_.hidden->Resize(out_dims);
......
...@@ -28,9 +28,9 @@ bool GRUUnitOpLite::CheckShape() const { ...@@ -28,9 +28,9 @@ bool GRUUnitOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.hidden); CHECK_OR_FALSE(param_.hidden);
CHECK_OR_FALSE(param_.weight); CHECK_OR_FALSE(param_.weight);
auto input_dims = param_.input->dims(); const auto &input_dims = param_.input->dims();
auto hidden_prev_dims = param_.hidden_prev->dims(); const auto &hidden_prev_dims = param_.hidden_prev->dims();
auto weight_dims = param_.weight->dims(); const auto &weight_dims = param_.weight->dims();
int input_size = input_dims[1]; int input_size = input_dims[1];
int frame_size = hidden_prev_dims[1]; int frame_size = hidden_prev_dims[1];
...@@ -52,8 +52,8 @@ bool GRUUnitOpLite::CheckShape() const { ...@@ -52,8 +52,8 @@ bool GRUUnitOpLite::CheckShape() const {
} }
bool GRUUnitOpLite::InferShape() const { bool GRUUnitOpLite::InferShape() const {
auto input_dims = param_.input->dims(); const auto &input_dims = param_.input->dims();
auto hidden_prev_dims = param_.hidden_prev->dims(); const auto &hidden_prev_dims = param_.hidden_prev->dims();
int batch_size = input_dims[0]; int batch_size = input_dims[0];
int frame_size = hidden_prev_dims[1]; int frame_size = hidden_prev_dims[1];
......
...@@ -43,12 +43,10 @@ bool InstanceNormOp::CheckShape() const { ...@@ -43,12 +43,10 @@ bool InstanceNormOp::CheckShape() const {
} }
bool InstanceNormOp::InferShape() const { bool InstanceNormOp::InferShape() const {
auto x_dims = param_.x->dims(); const auto& x_dims = param_.x->dims();
int64_t batch_size = x_dims[0]; DDim saved_dims({x_dims[0] * x_dims[1]}); // batch_size * channel_size
int64_t channel_size = x_dims[1]; param_.saved_mean->Resize(saved_dims);
param_.saved_mean->Resize(std::vector<int64_t>({batch_size * channel_size})); param_.saved_variance->Resize(saved_dims);
param_.saved_variance->Resize(
std::vector<int64_t>({batch_size * channel_size}));
param_.out->Resize(x_dims); param_.out->Resize(x_dims);
return true; return true;
} }
......
...@@ -25,8 +25,8 @@ bool LookupTableOpLite::CheckShape() const { ...@@ -25,8 +25,8 @@ bool LookupTableOpLite::CheckShape() const {
CHECK_OR_FALSE(param_.Ids) CHECK_OR_FALSE(param_.Ids)
CHECK_OR_FALSE(param_.Out) CHECK_OR_FALSE(param_.Out)
auto table_dims = param_.W->dims(); const auto &table_dims = param_.W->dims();
auto ids_dims = param_.Ids->dims(); const auto &ids_dims = param_.Ids->dims();
int ids_rank = ids_dims.size(); int ids_rank = ids_dims.size();
...@@ -37,8 +37,8 @@ bool LookupTableOpLite::CheckShape() const { ...@@ -37,8 +37,8 @@ bool LookupTableOpLite::CheckShape() const {
} }
bool LookupTableOpLite::InferShape() const { bool LookupTableOpLite::InferShape() const {
auto &table_dims = param_.W->dims(); const auto &table_dims = param_.W->dims();
auto &ids_dims = param_.Ids->dims(); const auto &ids_dims = param_.Ids->dims();
auto out_dims = ids_dims; auto out_dims = ids_dims;
int ids_rank = ids_dims.size(); int ids_rank = ids_dims.size();
......
...@@ -45,8 +45,8 @@ bool MatchMatrixTensorOpLite::CheckShape() const { ...@@ -45,8 +45,8 @@ bool MatchMatrixTensorOpLite::CheckShape() const {
bool MatchMatrixTensorOpLite::InferShape() const { bool MatchMatrixTensorOpLite::InferShape() const {
const Tensor* x = param_.x; const Tensor* x = param_.x;
const Tensor* y = param_.y; const Tensor* y = param_.y;
DDim x_dims = param_.x->dims(); const auto& x_dims = param_.x->dims();
DDim y_dims = param_.y->dims(); const auto& y_dims = param_.y->dims();
int dim_t = param_.dim_t; int dim_t = param_.dim_t;
const auto& x_lod = x->lod(); const auto& x_lod = x->lod();
......
...@@ -29,11 +29,13 @@ bool ReduceOp::CheckShape() const { ...@@ -29,11 +29,13 @@ bool ReduceOp::CheckShape() const {
} }
bool ReduceOp::InferShape() const { bool ReduceOp::InferShape() const {
auto& x_dims = param_.x->dims(); const auto& x_dims = param_.x->dims();
auto x_rank = x_dims.size(); auto x_rank = x_dims.size();
auto& dims = param_.dim; auto dims = param_.dim;
for (size_t i = 0; i < dims.size(); ++i) { for (size_t i = 0; i < dims.size(); ++i) {
if (dims[i] < 0) dims[i] = x_rank + dims[i]; if (dims[i] < 0) {
dims[i] = x_rank + dims[i];
}
CHECK_LT(dims[i], x_rank) CHECK_LT(dims[i], x_rank)
<< "The dim should be in the range [-rank(input), rank(input)."; << "The dim should be in the range [-rank(input), rank(input).";
} }
...@@ -58,7 +60,7 @@ bool ReduceOp::InferShape() const { ...@@ -58,7 +60,7 @@ bool ReduceOp::InferShape() const {
int dim_index = 0; int dim_index = 0;
int out_index = 0; int out_index = 0;
for (size_t i = 0; i < x_rank; ++i) { for (size_t i = 0; i < x_rank; ++i) {
if (dims[dim_index] == i) { if (dims[dim_index] == static_cast<DDim::value_type>(i)) {
dim_index++; dim_index++;
} else { } else {
out_dims[out_index++] = x_dims[i]; out_dims[out_index++] = x_dims[i];
......
...@@ -27,11 +27,11 @@ bool ReshapeOp::CheckShape() const { ...@@ -27,11 +27,11 @@ bool ReshapeOp::CheckShape() const {
} }
bool ReshapeOp::InferShape() const { bool ReshapeOp::InferShape() const {
auto &shape_tensor_vct = param_.shape_tensor_vct; const auto &shape_tensor_vct = param_.shape_tensor_vct;
auto *shape_tensor = param_.shape_tensor; auto *shape_tensor = param_.shape_tensor;
auto &shape_vct = param_.shape_vct; const auto &shape_vct = param_.shape_vct;
std::vector<int> final_shape;
std::vector<int> final_shape;
if (shape_tensor_vct.size() > 0) { if (shape_tensor_vct.size() > 0) {
final_shape.resize(shape_tensor_vct.size()); final_shape.resize(shape_tensor_vct.size());
for (int i = 0; i < shape_tensor_vct.size(); i++) { for (int i = 0; i < shape_tensor_vct.size(); i++) {
...@@ -47,7 +47,7 @@ bool ReshapeOp::InferShape() const { ...@@ -47,7 +47,7 @@ bool ReshapeOp::InferShape() const {
LOG(FATAL) << "input shape error"; LOG(FATAL) << "input shape error";
} }
auto &x_dims = param_.x->dims(); const auto &x_dims = param_.x->dims();
auto output_dims = ValidateShape(final_shape, x_dims); auto output_dims = ValidateShape(final_shape, x_dims);
param_.output->Resize(output_dims); param_.output->Resize(output_dims);
auto out_lod = param_.output->mutable_lod(); auto out_lod = param_.output->mutable_lod();
...@@ -99,7 +99,7 @@ bool Reshape2Op::CheckShape() const { ...@@ -99,7 +99,7 @@ bool Reshape2Op::CheckShape() const {
bool Reshape2Op::InferShape() const { bool Reshape2Op::InferShape() const {
ReshapeOp::InferShape(); ReshapeOp::InferShape();
auto &x_dims = param_.x->dims(); const auto &x_dims = param_.x->dims();
DDim xshape_dims; DDim xshape_dims;
xshape_dims.resize(x_dims.size() + 1); xshape_dims.resize(x_dims.size() + 1);
xshape_dims[0] = 0; xshape_dims[0] = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册