diff --git a/mindspore/ccsrc/ir/tensor.cc b/mindspore/ccsrc/ir/tensor.cc index 673a8da8423ec46108613535b01bbaa3633e6d95..dccdbb65b8cc87c328d97b5001f74c0bdcd017fe 100644 --- a/mindspore/ccsrc/ir/tensor.cc +++ b/mindspore/ccsrc/ir/tensor.cc @@ -35,7 +35,7 @@ using Bool = unsigned char; static std::string MakeId() { // Use atomic to make id generator thread safe. static std::atomic last_id{1}; - return std::to_string(last_id.fetch_add(1, std::memory_order_relaxed)); + return "T" + std::to_string(last_id.fetch_add(1, std::memory_order_relaxed)); } static TypeId TypeIdOf(const TypePtr &data_type, TypeId defaultTypeId) { @@ -127,41 +127,47 @@ std::vector CopyData(const std::vector &shape, void *data, size_t data_l template class TensorDataImpl : public TensorData { public: - explicit TensorDataImpl(const std::vector &shape) : shape_(shape), data_(SizeOf(shape)) {} + explicit TensorDataImpl(const std::vector &shape) : ndim_(shape.size()), data_size_(SizeOf(shape)) {} TensorDataImpl(const std::vector &shape, void *data, size_t data_len) - : shape_(shape), data_(CopyData(shape, data, data_len)) {} + : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData(shape, data, data_len)) {} TensorDataImpl(const std::vector &shape, void *data, TypeId data_type) - : shape_(shape), data_(CopyData(shape, data, data_type)) {} + : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData(shape, data, data_type)) {} template - TensorDataImpl(const std::vector &shape, InputIt first, InputIt last) : shape_(shape), data_(first, last) {} + TensorDataImpl(const std::vector &shape, InputIt first, InputIt last) + : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(first, last) {} template - TensorDataImpl(const std::vector &shape, Scalar scalar) : shape_(shape), data_({static_cast(scalar)}) {} + TensorDataImpl(const std::vector &shape, Scalar scalar) + : ndim_(shape.size()), data_size_(SizeOf(shape)), data_({static_cast(scalar)}) {} - ssize_t size() const override { return data_.size(); } + ssize_t size() const override { return static_cast(data_size_); } ssize_t itemsize() const override { return static_cast(sizeof(T)); } ssize_t nbytes() const override { return size() * itemsize(); } - ssize_t ndim() const override { return static_cast(shape_.size()); } + ssize_t ndim() const override { return static_cast(ndim_); } void *data() override { static std::vector empty_data(1); - if (data_.empty()) { - // Prevent null pointer for empty data. + if (data_size_ == 0) { + // Prevent null pointer for empty shape. return empty_data.data(); } + if (data_.empty()) { + // Lazy allocation. + data_.resize(data_size_); + } return data_.data(); } bool equals(const TensorData &other) const override { auto ptr = dynamic_cast *>(&other); if (ptr) { - return (ptr == this) || ((shape_ == ptr->shape_) && (data_ == ptr->data_)); + return (ptr == this) || ((ndim_ == ptr->ndim_) && (data_size_ == ptr->data_size_) && (data_ == ptr->data_)); } return false; } @@ -177,7 +183,8 @@ class TensorDataImpl : public TensorData { } private: - std::vector shape_; + size_t ndim_{0}; + size_t data_size_{0}; std::vector data_; };