提交 1abd9921 编写于 作者: L Liu Yiqun

Use static array instead of std::vector in DDimLite.

test=develop
上级 28481458
...@@ -25,35 +25,31 @@ using value_type = int64_t; ...@@ -25,35 +25,31 @@ using value_type = int64_t;
value_type DDimLite::production() const { value_type DDimLite::production() const {
value_type res = 1; value_type res = 1;
for (size_t i = 0; i < this->size(); i++) { for (size_t i = 0; i < data_.size(); i++) {
res *= (*this)[i]; res *= data_[i];
} }
return res; return res;
} }
value_type DDimLite::count(int start, int end) const { value_type DDimLite::count(int start, int end) const {
if (start < 0) { start = std::max(start, 0);
start = 0; end = std::min(end, static_cast<int>(data_.size()));
}
if (end > size()) {
end = size();
}
if (end < start) { if (end < start) {
end = start; return 0;
} }
value_type sum = 1; value_type res = 1;
for (auto i = start; i < end; ++i) { for (auto i = start; i < end; ++i) {
sum *= data_[i]; res *= data_[i];
} }
return sum; return res;
} }
DDimLite DDimLite::Slice(int start, int end) const { DDimLite DDimLite::Slice(int start, int end) const {
std::vector<value_type> vec; start = std::max(start, 0);
for (int i = start; i < end; i++) { end = std::min(end, static_cast<int>(data_.size()));
vec.push_back((*this)[i]); value_type arr[kMaxDimLength];
} memcpy(arr, data_.data() + start, (end - start) * sizeof(value_type));
return DDimLite(vec); return DDimLite(arr, end - start);
} }
std::string DDimLite::repr() const { std::string DDimLite::repr() const {
......
...@@ -38,28 +38,69 @@ class TensorLite; ...@@ -38,28 +38,69 @@ class TensorLite;
using DDim = lite::DDimLite; using DDim = lite::DDimLite;
using Tensor = lite::TensorLite; using Tensor = lite::TensorLite;
template <typename ValueType, int DimLength>
class DimVector {
public:
DimVector() {
memset(arr_, 0, DimLength * sizeof(ValueType));
size_ = 0;
}
size_t size() const { return size_; }
void resize(size_t new_size) {
CHECK_LE(new_size, DimLength);
size_ = new_size;
}
ValueType *data() { return arr_; }
const ValueType *data() const { return arr_; }
ValueType operator[](int offset) const { return arr_[offset]; }
ValueType &operator[](int offset) { return arr_[offset]; }
private:
ValueType arr_[DimLength];
size_t size_{0};
};
constexpr int kMaxDimLength = 10;
class DDimLite { class DDimLite {
public: public:
using value_type = int64_t; using value_type = int64_t;
using DDimVector = DimVector<value_type, kMaxDimLength>;
DDimLite() = default; DDimLite() = default;
explicit DDimLite(const std::vector<value_type> &x) { ConstructFrom(x); } explicit DDimLite(const std::vector<value_type> &x) { ConstructFrom(x); }
// DDimLite(std::initializer_list<value_type> init_list) : explicit DDimLite(const value_type *arr, size_t size) {
// DDimLite(std::vector<value_type>(init_list)) {} data_.resize(size);
memcpy(data_.data(), arr, data_.size() * sizeof(value_type));
}
void ConstructFrom(const std::vector<value_type> &x) { data_ = x; } void ConstructFrom(const std::vector<value_type> &x) {
data_.resize(x.size());
memcpy(data_.data(), x.data(), x.size() * sizeof(value_type));
}
value_type operator[](int offset) const { return data_[offset]; } value_type operator[](int offset) const { return data_[offset]; }
value_type &operator[](int offset) { return data_[offset]; } value_type &operator[](int offset) { return data_[offset]; }
std::vector<int64_t> Vectorize() const { return data_; }
std::vector<value_type> Vectorize() const {
std::vector<value_type> vec;
if (data_.size() > 0U) {
vec.resize(data_.size());
memcpy(vec.data(), data_.data(), data_.size() * sizeof(value_type));
}
return vec;
}
size_t size() const { return data_.size(); } size_t size() const { return data_.size(); }
bool empty() const { return data_.empty(); } bool empty() const { return data_.size() == 0U; }
value_type production() const; const DDimVector &data() const { return data_; }
const std::vector<value_type> &data() const { return data_; } value_type production() const;
value_type count(int start, int end) const; value_type count(int start, int end) const;
DDimLite Slice(int start, int end) const; DDimLite Slice(int start, int end) const;
...@@ -76,6 +117,12 @@ class DDimLite { ...@@ -76,6 +117,12 @@ class DDimLite {
return os; return os;
} }
DDimLite &operator=(const DDimLite &a) {
this->data_.resize(a.size());
memcpy(this->data_.data(), a.data_.data(), a.size() * sizeof(value_type));
return *this;
}
friend bool operator==(const DDimLite &a, const DDimLite &b) { friend bool operator==(const DDimLite &a, const DDimLite &b) {
if (a.size() != b.size()) return false; if (a.size() != b.size()) return false;
for (size_t i = 0; i < a.size(); i++) { for (size_t i = 0; i < a.size(); i++) {
...@@ -85,11 +132,15 @@ class DDimLite { ...@@ -85,11 +132,15 @@ class DDimLite {
} }
friend bool operator!=(const DDimLite &a, const DDimLite &b) { friend bool operator!=(const DDimLite &a, const DDimLite &b) {
return !(a == b); if (a.size() != b.size()) return true;
for (size_t i = 0; i < a.size(); i++) {
if (a[i] != b[i]) return true;
}
return false;
} }
private: private:
std::vector<value_type> data_; DDimVector data_;
}; };
using LoD = std::vector<std::vector<uint64_t>>; using LoD = std::vector<std::vector<uint64_t>>;
......
...@@ -359,8 +359,8 @@ void MulticlassNmsCompute::Run() { ...@@ -359,8 +359,8 @@ void MulticlassNmsCompute::Run() {
uint64_t num_kept = batch_starts.back(); uint64_t num_kept = batch_starts.back();
if (num_kept == 0) { if (num_kept == 0) {
if (return_index) { if (return_index) {
outs->Resize({0, out_dim}); outs->Resize(std::vector<int64_t>{0, out_dim});
index->Resize({0, 1}); index->Resize(std::vector<int64_t>{0, 1});
} else { } else {
outs->Resize({1, 1}); outs->Resize({1, 1});
float* od = outs->mutable_data<float>(); float* od = outs->mutable_data<float>();
...@@ -375,8 +375,8 @@ void MulticlassNmsCompute::Run() { ...@@ -375,8 +375,8 @@ void MulticlassNmsCompute::Run() {
if (score_size == 3) { if (score_size == 3) {
scores_slice = scores->Slice<float>(i, i + 1); scores_slice = scores->Slice<float>(i, i + 1);
boxes_slice = boxes->Slice<float>(i, i + 1); boxes_slice = boxes->Slice<float>(i, i + 1);
scores_slice.Resize({score_dims[1], score_dims[2]}); scores_slice.Resize(std::vector<int64_t>{score_dims[1], score_dims[2]});
boxes_slice.Resize({score_dims[2], box_dim}); boxes_slice.Resize(std::vector<int64_t>{score_dims[2], box_dim});
if (return_index) { if (return_index) {
offset = i * score_dims[2]; offset = i * score_dims[2];
} }
......
...@@ -29,7 +29,6 @@ namespace x86 { ...@@ -29,7 +29,6 @@ namespace x86 {
template <typename T> template <typename T>
void Compute(const lite::Tensor* in, lite::Tensor* out) { void Compute(const lite::Tensor* in, lite::Tensor* out) {
auto out_dims = out->dims(); auto out_dims = out->dims();
auto in_dims = in->dims();
out->CopyDataFrom(*in); out->CopyDataFrom(*in);
out->Resize(out_dims); out->Resize(out_dims);
} }
......
...@@ -32,7 +32,6 @@ bool GenerateProposalsOpLite::CheckShape() const { ...@@ -32,7 +32,6 @@ bool GenerateProposalsOpLite::CheckShape() const {
auto scores_dims = param_.Scores->dims(); auto scores_dims = param_.Scores->dims();
auto bbox_dims = param_.BboxDeltas->dims(); auto bbox_dims = param_.BboxDeltas->dims();
auto im_info_dims = param_.ImInfo->dims();
auto anchors_dims = param_.Anchors->dims(); auto anchors_dims = param_.Anchors->dims();
auto vars_dims = param_.Variances->dims(); auto vars_dims = param_.Variances->dims();
......
...@@ -54,7 +54,6 @@ bool GRUUnitOpLite::CheckShape() const { ...@@ -54,7 +54,6 @@ bool GRUUnitOpLite::CheckShape() const {
bool GRUUnitOpLite::InferShape() const { bool GRUUnitOpLite::InferShape() const {
auto input_dims = param_.input->dims(); auto input_dims = param_.input->dims();
auto hidden_prev_dims = param_.hidden_prev->dims(); auto hidden_prev_dims = param_.hidden_prev->dims();
auto weight_dims = param_.weight->dims();
int batch_size = input_dims[0]; int batch_size = input_dims[0];
int frame_size = hidden_prev_dims[1]; int frame_size = hidden_prev_dims[1];
......
...@@ -42,8 +42,6 @@ bool LookupTableOpLite::InferShape() const { ...@@ -42,8 +42,6 @@ bool LookupTableOpLite::InferShape() const {
int ids_rank = ids_dims.size(); int ids_rank = ids_dims.size();
auto output_dims = ids_dims.Slice(0, ids_rank - 1);
std::vector<int64_t> out_dims; std::vector<int64_t> out_dims;
for (int i = 0; i < ids_rank - 1; ++i) { for (int i = 0; i < ids_rank - 1; ++i) {
out_dims.push_back(ids_dims[i]); out_dims.push_back(ids_dims[i]);
......
...@@ -47,7 +47,6 @@ bool MatchMatrixTensorOpLite::InferShape() const { ...@@ -47,7 +47,6 @@ bool MatchMatrixTensorOpLite::InferShape() const {
const Tensor* y = param_.y; const Tensor* y = param_.y;
DDim x_dims = param_.x->dims(); DDim x_dims = param_.x->dims();
DDim y_dims = param_.y->dims(); DDim y_dims = param_.y->dims();
DDim w_dims = param_.w->dims();
int dim_t = param_.dim_t; int dim_t = param_.dim_t;
const auto& x_lod = x->lod(); const auto& x_lod = x->lod();
......
...@@ -47,7 +47,6 @@ void Relu(float* out, int num, int channel) { ...@@ -47,7 +47,6 @@ void Relu(float* out, int num, int channel) {
DDim ComputeOutDim(const DDim& dim_in, const DDim& wdim, int in_num_col_dim) { DDim ComputeOutDim(const DDim& dim_in, const DDim& wdim, int in_num_col_dim) {
std::vector<int64_t> out_dim; std::vector<int64_t> out_dim;
out_dim.resize(in_num_col_dim + 1); out_dim.resize(in_num_col_dim + 1);
auto in_mat_dims = dim_in.Flatten2D(in_num_col_dim);
for (int i = 0; i < in_num_col_dim; ++i) { for (int i = 0; i < in_num_col_dim; ++i) {
out_dim[i] = dim_in[i]; out_dim[i] = dim_in[i];
} }
......
...@@ -153,7 +153,8 @@ class SliceComputeTester : public arena::TestCase { ...@@ -153,7 +153,8 @@ class SliceComputeTester : public arena::TestCase {
} }
out->Resize(out_dims); out->Resize(out_dims);
auto* out_data = out->mutable_data<float>(); auto* out_data = out->mutable_data<float>();
slice_ref(input_data, in_dims.data(), axes_, starts_, ends_, out_data); std::vector<int64_t> in_dims_vec = in_dims.Vectorize();
slice_ref(input_data, in_dims_vec, axes_, starts_, ends_, out_data);
} }
void PrepareOpDesc(cpp::OpDesc* op_desc) { void PrepareOpDesc(cpp::OpDesc* op_desc) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册