From 118c1441567de9fb78b1989e6f8135f5ca19427b Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 6 Feb 2020 12:03:32 +0000 Subject: [PATCH] Save the workspace. --- lite/core/program.cc | 1 + lite/core/tensor.h | 57 +++++++++++++++++---- lite/kernels/x86/lookup_table_compute.h | 2 + lite/kernels/x86/sequence_reshape_compute.h | 4 +- lite/operators/lookup_table_op.cc | 3 ++ 5 files changed, 54 insertions(+), 13 deletions(-) diff --git a/lite/core/program.cc b/lite/core/program.cc index 41d178f015..7495368c15 100644 --- a/lite/core/program.cc +++ b/lite/core/program.cc @@ -137,6 +137,7 @@ void RuntimeProgram::UpdateVarsOfProgram(cpp::ProgramDesc* desc) { void RuntimeProgram::Run() { for (auto& inst : instructions_) { + // LOG(INFO) << "Run op: " << inst.op()->op_info()->Type(); if (inst.is_feed_fetch_op()) continue; inst.Run(); #ifdef LITE_WITH_PROFILE diff --git a/lite/core/tensor.h b/lite/core/tensor.h index af32f379e7..67428dbcda 100644 --- a/lite/core/tensor.h +++ b/lite/core/tensor.h @@ -42,26 +42,47 @@ template class DimVector { public: DimVector() { - memset(arr_, 0, DimLength * sizeof(ValueType)); + // data_ = new ValueType[DimLength]; + // data_ = static_cast(malloc(DimLength * + // sizeof(ValueType))); + data_.resize(DimLength); + // memset(data_, 0, DimLength * sizeof(ValueType)); size_ = 0; } + ~DimVector() { + // if (data_) { + // delete[] data_; + // free(data_); + // } + } size_t size() const { return size_; } void resize(size_t new_size) { CHECK_LE(new_size, DimLength) << "Expected the number of dimentations <= " << DimLength << ", received " << new_size << "."; + // if (new_size != size_) { + // delete[] data_; + // data_ = nullptr; + // } size_ = new_size; } - ValueType *data() { return arr_; } - const ValueType *data() const { return arr_; } + ValueType *mutable_data() { + // if (!data_ && size_ > 0U) { + // data_ = new ValueType[size_]; + // } + return data_.data(); + } + const ValueType *data() const { return data_.data(); } - ValueType operator[](int offset) const { return arr_[offset]; } - ValueType &operator[](int offset) { return arr_[offset]; } + ValueType operator[](int offset) const { return data_[offset]; } + ValueType &operator[](int offset) { return data_[offset]; } private: - ValueType arr_[DimLength]; + // ValueType data_[DimLength]; + // ValueType* data_{nullptr}; + std::vector data_; size_t size_{0}; }; @@ -78,7 +99,7 @@ class DDimLite { void ConstructFrom(const std::vector &x) { data_.resize(x.size()); - memcpy(data_.data(), x.data(), x.size() * sizeof(value_type)); + memcpy(data_.mutable_data(), x.data(), x.size() * sizeof(value_type)); } value_type operator[](int offset) const { return data_[offset]; } @@ -127,7 +148,9 @@ class DDimLite { DDimLite &operator=(const DDimLite &a) { this->data_.resize(a.size()); - memcpy(this->data_.data(), a.data_.data(), a.size() * sizeof(value_type)); + memcpy(this->data_.mutable_data(), + a.data_.data(), + a.size() * sizeof(value_type)); return *this; } @@ -176,10 +199,19 @@ class TensorLite { offset_); } - void Resize(const DDimLite &ddim) { dims_ = ddim; } - void Resize(const std::vector &x) { dims_ = DDimLite(x); } + void Resize(const DDimLite &ddim) { + dims_ = ddim; + // LOG(INFO) << "Set dims: " << dims_ << " for tensor " << this; + } + void Resize(const std::vector &x) { + dims_ = DDimLite(x); + // LOG(INFO) << "Set dims: " << dims_ << " for tensor " << this; + } - const DDimLite &dims() const { return dims_; } + const DDimLite &dims() const { + // LOG(INFO) << "Get dims: " << dims_ << " for tensor " << this; + return dims_; + } int64_t numel() const { return dims_.production(); } const LoD &lod() const { return lod_; } @@ -216,6 +248,9 @@ class TensorLite { } memory_size_ = dims_.production() * sizeof(T); buffer_->ResetLazy(target_, memory_size_); + // char *ptr = static_cast(buffer_->data()) + offset_; + // LOG(INFO) << "mutable_data for tensor " << this << ": " << ptr << ", + // memory_size: " << memory_size_; return reinterpret_cast(static_cast(buffer_->data()) + offset_); } diff --git a/lite/kernels/x86/lookup_table_compute.h b/lite/kernels/x86/lookup_table_compute.h index eeafa2e33e..e67f7696d1 100644 --- a/lite/kernels/x86/lookup_table_compute.h +++ b/lite/kernels/x86/lookup_table_compute.h @@ -34,9 +34,11 @@ class LookupTableCompute : public KernelLite { auto *output_t = param.Out; int64_t padding_idx = param.padding_idx; auto *ids = ids_t->data(); + // LOG(INFO) << "ids->dims: " << ids_t->dims(); int64_t ids_numel = ids_t->dims().production(); auto *table_t = param.W; + // LOG(INFO) << "W->dims: " << table_t->dims(); int64_t row_number = table_t->dims()[0]; int64_t row_width = table_t->dims()[1]; diff --git a/lite/kernels/x86/sequence_reshape_compute.h b/lite/kernels/x86/sequence_reshape_compute.h index 68a573c2f6..343804942e 100644 --- a/lite/kernels/x86/sequence_reshape_compute.h +++ b/lite/kernels/x86/sequence_reshape_compute.h @@ -36,9 +36,9 @@ class SequenceReshapeCompute auto* out = param.output; int out_width = param.new_dim; - auto in_dims = in->dims(); + const auto& in_dims = in->dims(); + // LOG(INFO) << "in_dims: " << in_dims; int64_t in_width = in_dims[1]; - // LOG(INFO)<<"sequence_reshape in tensor:"<<*in; auto& in_lod = in->lod(); CHECK_EQ(in_lod.size(), 1UL); diff --git a/lite/operators/lookup_table_op.cc b/lite/operators/lookup_table_op.cc index 6ee37ebb51..8c6b7f502d 100644 --- a/lite/operators/lookup_table_op.cc +++ b/lite/operators/lookup_table_op.cc @@ -38,12 +38,15 @@ bool LookupTableOpLite::CheckShape() const { bool LookupTableOpLite::InferShape() const { const auto &table_dims = param_.W->dims(); + // LOG(INFO) << "table_dims: " << table_dims; const auto &ids_dims = param_.Ids->dims(); + // LOG(INFO) << "ids_dims: " << ids_dims; auto out_dims = ids_dims; int ids_rank = ids_dims.size(); out_dims[ids_rank - 1] = table_dims[1]; + // LOG(INFO) << "out_dims: " << out_dims; param_.Out->Resize(out_dims); param_.Out->set_lod(param_.Ids->lod()); return true; -- GitLab