提交 6124a4ed 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!2859 Print tensor as numpy.

Merge pull request !2859 from ZhangQinghua/master
...@@ -23,12 +23,18 @@ ...@@ -23,12 +23,18 @@
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <utility> #include <utility>
#include <iomanip>
#include <algorithm>
#include <type_traits>
#include <typeinfo>
#include "device/device_address.h" #include "device/device_address.h"
#include "pipeline/static_analysis/abstract_value.h" #include "pipeline/static_analysis/abstract_value.h"
namespace mindspore { namespace mindspore {
namespace tensor { namespace tensor {
constexpr auto kEllipsis = "...";
constexpr auto kThreshold = 6;
static std::string MakeId() { static std::string MakeId() {
// Use atomic to make id generator thread safe. // Use atomic to make id generator thread safe.
...@@ -114,21 +120,22 @@ std::vector<T> CopyData(const std::vector<int> &shape, void *data, size_t data_l ...@@ -114,21 +120,22 @@ std::vector<T> CopyData(const std::vector<int> &shape, void *data, size_t data_l
template <typename T> template <typename T>
class TensorDataImpl : public TensorData { class TensorDataImpl : public TensorData {
public: public:
explicit TensorDataImpl(const std::vector<int> &shape) : ndim_(shape.size()), data_size_(SizeOf(shape)) {} explicit TensorDataImpl(const std::vector<int> &shape)
: ndim_(shape.size()), data_size_(SizeOf(shape)), shape_(shape) {}
TensorDataImpl(const std::vector<int> &shape, void *data, size_t data_len) TensorDataImpl(const std::vector<int> &shape, void *data, size_t data_len)
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_len)) {} : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_len)), shape_(shape) {}
TensorDataImpl(const std::vector<int> &shape, void *data, TypeId data_type) TensorDataImpl(const std::vector<int> &shape, void *data, TypeId data_type)
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_type)) {} : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_type)), shape_(shape) {}
template <typename InputIt> template <typename InputIt>
TensorDataImpl(const std::vector<int> &shape, InputIt first, InputIt last) TensorDataImpl(const std::vector<int> &shape, InputIt first, InputIt last)
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(first, last) {} : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(first, last), shape_(shape) {}
template <typename Scalar> template <typename Scalar>
TensorDataImpl(const std::vector<int> &shape, Scalar scalar) TensorDataImpl(const std::vector<int> &shape, Scalar scalar)
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_({static_cast<T>(scalar)}) {} : ndim_(shape.size()), data_size_(SizeOf(shape)), data_({static_cast<T>(scalar)}), shape_(shape) {}
ssize_t size() const override { return static_cast<ssize_t>(data_size_); } ssize_t size() const override { return static_cast<ssize_t>(data_size_); }
...@@ -144,13 +151,12 @@ class TensorDataImpl : public TensorData { ...@@ -144,13 +151,12 @@ class TensorDataImpl : public TensorData {
// Prevent null pointer for empty shape. // Prevent null pointer for empty shape.
return empty_data.data(); return empty_data.data();
} }
if (data_.empty()) { CheckDataSafe();
// Lazy allocation.
data_.resize(data_size_);
}
return data_.data(); return data_.data();
} }
std::vector<int> shape() const { return shape_; }
bool equals(const TensorData &other) const override { bool equals(const TensorData &other) const override {
auto ptr = dynamic_cast<const TensorDataImpl<T> *>(&other); auto ptr = dynamic_cast<const TensorDataImpl<T> *>(&other);
if (ptr) { if (ptr) {
...@@ -159,20 +165,121 @@ class TensorDataImpl : public TensorData { ...@@ -159,20 +165,121 @@ class TensorDataImpl : public TensorData {
return false; return false;
} }
// Prepare for lazy allocation.
void CheckDataSafe() {
// Lazy allocation.
if (data_.empty()) {
data_.resize(data_size_);
}
}
// ToString() for lazy allocation.
std::string ToStringSafe() {
CheckDataSafe();
return ToString();
}
std::string ToString() const override { std::string ToString() const override {
constexpr auto valid =
std::is_same<T, Bool>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value ||
std::is_same<T, uint16_t>::value || std::is_same<T, uint32_t>::value || std::is_same<T, uint64_t>::value ||
std::is_same<T, float16>::value || std::is_same<T, float>::value || std::is_same<T, double>::value;
if (!valid) {
MS_LOG(EXCEPTION) << "Type is invalid, T: " << typeid(T).name();
}
if (data_size_ == 0) {
return "";
}
if (data_.empty()) {
MS_LOG(ERROR) << "data_ is empty, data_size_: " << data_size_;
return "";
}
std::ostringstream ss; std::ostringstream ss;
ssize_t cursor = 0;
SummaryStringRecursive(ss, &cursor, 0);
return ss.str();
}
private:
void OutputDataString(std::ostringstream &ss, ssize_t cursor, ssize_t start, ssize_t end) const {
constexpr auto isFloat =
std::is_same<T, float16>::value || std::is_same<T, float>::value || std::is_same<T, double>::value;
constexpr auto isSigned = std::is_same<T, int8_t>::value || std::is_same<T, int16_t>::value ||
std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value;
for (ssize_t i = start; i < end && (cursor + i) < static_cast<ssize_t>(data_size_); i++) {
if (isFloat) {
ss << std::setw(15) << std::setprecision(8) << std::setiosflags(std::ios::scientific | std::ios::right)
<< data_[cursor + i];
} else {
if (isSigned && static_cast<int64_t>(data_[cursor + i]) >= 0) {
ss << ' ';
}
ss << data_[cursor + i];
}
if (i != end - 1) {
ss << ' ';
}
}
}
void SummaryStringRecursive(std::ostringstream &ss, ssize_t *cursor, ssize_t depth) const {
if (depth >= static_cast<ssize_t>(ndim_)) {
return;
}
ss << '['; ss << '[';
for (auto value : data_) { if (depth == static_cast<ssize_t>(ndim_) - 1) { // Bottom dimension
ss << value << ','; ssize_t num = shape_[depth];
if (num > kThreshold) {
OutputDataString(ss, *cursor, 0, kThreshold / 2);
ss << ' ' << kEllipsis << ' ';
OutputDataString(ss, *cursor, num - kThreshold / 2, num);
} else {
OutputDataString(ss, *cursor, 0, num);
}
*cursor += num;
} else { // Middle dimension
ssize_t num = shape_[depth];
// Handle the first half.
for (ssize_t i = 0; i < std::min(static_cast<ssize_t>(kThreshold / 2), num); i++) {
if (i > 0) {
ss << '\n';
ss << std::setw(depth + 1) << ' '; // Add the indent.
}
SummaryStringRecursive(ss, cursor, depth + 1);
}
// Handle the ignored part.
if (num > kThreshold) {
ss << '\n';
ss << std::setw(depth + 1) << ' '; // Add the indent.
ss << kEllipsis << '\n';
// Ignored at this layer.
ssize_t ignored = shape_[depth + 1];
for (ssize_t i = depth + 2; i < static_cast<ssize_t>(ndim_); i++) {
ignored *= shape_[i];
}
// Multiple with ignored layers number.
ignored *= num - kThreshold;
*cursor += ignored;
}
// Handle the second half.
if (num > kThreshold / 2) {
for (ssize_t i = num - kThreshold / 2; i < num; i++) {
ss << '\n';
ss << std::setw(depth + 1) << ' '; // Add the indent.
SummaryStringRecursive(ss, cursor, depth + 1);
}
}
} }
ss << ']'; ss << ']';
return ss.str();
} }
private:
size_t ndim_{0}; size_t ndim_{0};
size_t data_size_{0}; size_t data_size_{0};
std::vector<T> data_; std::vector<T> data_;
std::vector<int> shape_;
}; };
template <typename... Args> template <typename... Args>
...@@ -297,7 +404,7 @@ std::string Tensor::ToString() const { ...@@ -297,7 +404,7 @@ std::string Tensor::ToString() const {
buf << "Tensor shape:[" << shape() << "]" << this->Dtype()->ToString(); buf << "Tensor shape:[" << shape() << "]" << this->Dtype()->ToString();
// only print small tensor // only print small tensor
if (DataSize() < small_tensor_size) { if (DataSize() < small_tensor_size) {
buf << "val:" << data().ToString(); buf << ", value:" << data().ToString();
} }
return buf.str(); return buf.str();
} }
...@@ -307,10 +414,20 @@ std::string Tensor::ToStringRepr() const { ...@@ -307,10 +414,20 @@ std::string Tensor::ToStringRepr() const {
auto type_ptr = this->Dtype(); auto type_ptr = this->Dtype();
MS_EXCEPTION_IF_NULL(type_ptr); MS_EXCEPTION_IF_NULL(type_ptr);
buf << "Tensor shape:[" << shape() << "]" << type_ptr->ToString(); buf << "Tensor shape:[" << shape() << "]" << type_ptr->ToString();
buf << "\nval:" << data().ToString(); buf << "\nvalue:" << data().ToString();
return buf.str(); return buf.str();
} }
std::string Tensor::ToStringSafe() {
data().CheckDataSafe();
return ToString();
}
std::string Tensor::ToStringReprSafe() {
data().CheckDataSafe();
return ToStringRepr();
}
void Tensor::data_sync() const { void Tensor::data_sync() const {
if (device_address_ != nullptr) { if (device_address_ != nullptr) {
if (!device_address_->SyncDeviceToHost(shape(), static_cast<size_t>(data().nbytes()), data_type(), data_c())) { if (!device_address_->SyncDeviceToHost(shape(), static_cast<size_t>(data().nbytes()), data_type(), data_c())) {
......
...@@ -54,8 +54,14 @@ class TensorData { ...@@ -54,8 +54,14 @@ class TensorData {
virtual ssize_t ndim() const = 0; virtual ssize_t ndim() const = 0;
/// Data pointer. /// Data pointer.
virtual void *data() = 0; virtual void *data() = 0;
/// Shape of data.
virtual std::vector<int> shape() const = 0;
/// Is data equals. /// Is data equals.
virtual bool equals(const TensorData &other) const = 0; virtual bool equals(const TensorData &other) const = 0;
/// Check for lazy allocation.
virtual void CheckDataSafe() = 0;
/// To string for lazy allocation.
virtual std::string ToStringSafe() = 0;
/// To string. /// To string.
virtual std::string ToString() const = 0; virtual std::string ToString() const = 0;
}; };
...@@ -180,7 +186,6 @@ class Tensor : public MetaTensor { ...@@ -180,7 +186,6 @@ class Tensor : public MetaTensor {
// brief Get Tensor data pointer for c++ type // brief Get Tensor data pointer for c++ type
// //
// param writable true if writable, false if read only
// return The pointer to the object // return The pointer to the object
void *data_c() { return data().data(); } void *data_c() { return data().data(); }
...@@ -217,6 +222,12 @@ class Tensor : public MetaTensor { ...@@ -217,6 +222,12 @@ class Tensor : public MetaTensor {
std::string ToStringRepr() const; std::string ToStringRepr() const;
/// To string for lazy allocation.
std::string ToStringSafe();
/// To string for lazy allocation.
std::string ToStringReprSafe();
bool is_init() { return init_flag_; } bool is_init() { return init_flag_; }
void set_init_flag(bool flag) { init_flag_ = flag; } void set_init_flag(bool flag) { init_flag_ = flag; }
......
...@@ -351,8 +351,8 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) { ...@@ -351,8 +351,8 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
>>> data.set_dtype(mindspore.int32) >>> data.set_dtype(mindspore.int32)
mindspore.int32 mindspore.int32
)mydelimiter") )mydelimiter")
.def("__str__", &Tensor::ToString) .def("__str__", &Tensor::ToStringSafe)
.def("__repr__", &Tensor::ToStringRepr) .def("__repr__", &Tensor::ToStringReprSafe)
.def(py::pickle( .def(py::pickle(
[](const Tensor &t) { // __getstate__ [](const Tensor &t) { // __getstate__
/* Return a tuple that fully encodes the state of the object */ /* Return a tuple that fully encodes the state of the object */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册