提交 aef8084f 编写于 作者: L Liu Yiqun

Add a function to resize DDimLite instead of add a new construct.

上级 b79bf021
......@@ -47,7 +47,8 @@ value_type DDimLite::count(int start, int end) const {
DDimLite DDimLite::Slice(int start, int end) const {
start = std::max(start, 0);
end = std::min(end, static_cast<int>(data_.size()));
DDimLite new_dim(end - start);
DDimLite new_dim;
new_dim.resize(end - start);
for (int i = start; i < end; ++i) {
new_dim[i - start] = data_[i];
}
......
......@@ -73,7 +73,6 @@ class DDimLite {
DDimLite() = default;
explicit DDimLite(const std::vector<value_type> &x) { ConstructFrom(x); }
explicit DDimLite(size_t size) { data_.resize(size); }
void ConstructFrom(const std::vector<value_type> &x) {
data_.resize(x.size());
......@@ -93,6 +92,7 @@ class DDimLite {
}
size_t size() const { return data_.size(); }
void resize(size_t size) { data_.resize(size); }
bool empty() const { return data_.size() == 0U; }
const DDimVector &data() const { return data_; }
......
......@@ -139,8 +139,9 @@ class FcCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
auto w_dims0 = padding_weights ? w_dims[0] - 4 : w_dims[0];
auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1];
DDim out_dims;
out_dims.resize(static_cast<size_t>(in_num_col_dims + 1));
auto& in_dims = input->dims();
DDim out_dims(static_cast<size_t>(in_num_col_dims + 1));
for (int i = 0; i < in_num_col_dims; ++i) {
out_dims[i] = in_dims[i];
}
......
......@@ -64,8 +64,8 @@ void SearchGrnnCompute<T>::PrepareLayout(const Tensor* input_blob) {
auto& offset = _input->lod()[0];
Tensor _width;
_width.Resize({batch});
_idx_sorted_by_width->Resize({batch});
_width.Resize(std::vector<int64_t>({batch}));
_idx_sorted_by_width->Resize(std::vector<int64_t>({batch}));
int* width_data = _width.template mutable_data<int>();
int* idx_sorted_by_width_data =
_idx_sorted_by_width->template mutable_data<int>();
......
......@@ -62,10 +62,10 @@ bool BatchNormOp::InferShape() const {
break;
}
if (!param_.is_test) {
param_.mean_out->Resize({channel_size});
param_.variance_out->Resize({channel_size});
param_.saved_mean->Resize({channel_size});
param_.saved_variance->Resize({channel_size});
param_.mean_out->Resize(std::vector<int64_t>({channel_size}));
param_.variance_out->Resize(std::vector<int64_t>({channel_size}));
param_.saved_mean->Resize(std::vector<int64_t>({channel_size}));
param_.saved_variance->Resize(std::vector<int64_t>({channel_size}));
}
param_.y->Resize(x_dims);
return true;
......
......@@ -54,7 +54,8 @@ bool FcOpLite::InferShape() const {
int in_num_col_dims = param_.in_num_col_dims;
// Set output dims
DDim output_dims(in_num_col_dims + 1);
DDim output_dims;
output_dims.resize(in_num_col_dims + 1);
for (int i = 0; i < in_num_col_dims; ++i) {
output_dims[i] = input_dims[i];
}
......
......@@ -47,7 +47,8 @@ bool ReduceOp::InferShape() const {
param_.output->Resize(std::vector<int64_t>{1});
} else {
size_t out_rank = keep_dim ? x_rank : x_rank - dims.size();
DDim out_dims(out_rank);
DDim out_dims;
out_dims.resize(out_rank);
if (keep_dim) {
for (size_t i = 0; i < dims.size(); ++i) {
out_dims[dims[i]] = 1;
......
......@@ -100,7 +100,8 @@ bool Reshape2Op::CheckShape() const {
bool Reshape2Op::InferShape() const {
ReshapeOp::InferShape();
auto &x_dims = param_.x->dims();
DDim xshape_dims(x_dims.size() + 1);
DDim xshape_dims;
xshape_dims.resize(x_dims.size() + 1);
xshape_dims[0] = 0;
for (size_t i = 0; i < x_dims.size(); i++) {
xshape_dims[i + 1] = x_dims[i];
......@@ -126,7 +127,8 @@ DDim ValidateShape(const std::vector<int> &shape, const DDim &input_dims) {
const int unk_dim_val = -1;
const int copy_dim_val = 0;
DDim output_dims(shape.size());
DDim output_dims;
output_dims.resize(shape.size());
DDim::value_type capacity = 1;
int unk_dim_idx = -1;
for (size_t i = 0; i < shape.size(); ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册