Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
6124a4ed
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6124a4ed
编写于
7月 06, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 06, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2859 Print tensor as numpy.
Merge pull request !2859 from ZhangQinghua/master
上级
48e9d60a
e377c966
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
146 addition
and
18 deletion
+146
-18
mindspore/ccsrc/ir/tensor.cc
mindspore/ccsrc/ir/tensor.cc
+132
-15
mindspore/ccsrc/ir/tensor.h
mindspore/ccsrc/ir/tensor.h
+12
-1
mindspore/ccsrc/ir/tensor_py.cc
mindspore/ccsrc/ir/tensor_py.cc
+2
-2
未找到文件。
mindspore/ccsrc/ir/tensor.cc
浏览文件 @
6124a4ed
...
...
@@ -23,12 +23,18 @@
#include <sstream>
#include <string>
#include <utility>
#include <iomanip>
#include <algorithm>
#include <type_traits>
#include <typeinfo>
#include "device/device_address.h"
#include "pipeline/static_analysis/abstract_value.h"
namespace
mindspore
{
namespace
tensor
{
constexpr
auto
kEllipsis
=
"..."
;
constexpr
auto
kThreshold
=
6
;
static
std
::
string
MakeId
()
{
// Use atomic to make id generator thread safe.
...
...
@@ -114,21 +120,22 @@ std::vector<T> CopyData(const std::vector<int> &shape, void *data, size_t data_l
template
<
typename
T
>
class
TensorDataImpl
:
public
TensorData
{
public:
explicit
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
))
{}
explicit
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
shape_
(
shape
)
{}
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
,
void
*
data
,
size_t
data_len
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
CopyData
<
T
>
(
shape
,
data
,
data_len
))
{}
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
CopyData
<
T
>
(
shape
,
data
,
data_len
))
,
shape_
(
shape
)
{}
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
,
void
*
data
,
TypeId
data_type
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
CopyData
<
T
>
(
shape
,
data
,
data_type
))
{}
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
CopyData
<
T
>
(
shape
,
data
,
data_type
))
,
shape_
(
shape
)
{}
template
<
typename
InputIt
>
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
,
InputIt
first
,
InputIt
last
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
first
,
last
)
{}
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
first
,
last
)
,
shape_
(
shape
)
{}
template
<
typename
Scalar
>
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
,
Scalar
scalar
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
({
static_cast
<
T
>
(
scalar
)})
{}
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
({
static_cast
<
T
>
(
scalar
)})
,
shape_
(
shape
)
{}
ssize_t
size
()
const
override
{
return
static_cast
<
ssize_t
>
(
data_size_
);
}
...
...
@@ -144,13 +151,12 @@ class TensorDataImpl : public TensorData {
// Prevent null pointer for empty shape.
return
empty_data
.
data
();
}
if
(
data_
.
empty
())
{
// Lazy allocation.
data_
.
resize
(
data_size_
);
}
CheckDataSafe
();
return
data_
.
data
();
}
std
::
vector
<
int
>
shape
()
const
{
return
shape_
;
}
bool
equals
(
const
TensorData
&
other
)
const
override
{
auto
ptr
=
dynamic_cast
<
const
TensorDataImpl
<
T
>
*>
(
&
other
);
if
(
ptr
)
{
...
...
@@ -159,20 +165,121 @@ class TensorDataImpl : public TensorData {
return
false
;
}
// Prepare for lazy allocation.
void
CheckDataSafe
()
{
// Lazy allocation.
if
(
data_
.
empty
())
{
data_
.
resize
(
data_size_
);
}
}
// ToString() for lazy allocation.
std
::
string
ToStringSafe
()
{
CheckDataSafe
();
return
ToString
();
}
std
::
string
ToString
()
const
override
{
constexpr
auto
valid
=
std
::
is_same
<
T
,
Bool
>::
value
||
std
::
is_same
<
T
,
uint8_t
>::
value
||
std
::
is_same
<
T
,
int8_t
>::
value
||
std
::
is_same
<
T
,
int16_t
>::
value
||
std
::
is_same
<
T
,
int32_t
>::
value
||
std
::
is_same
<
T
,
int64_t
>::
value
||
std
::
is_same
<
T
,
uint16_t
>::
value
||
std
::
is_same
<
T
,
uint32_t
>::
value
||
std
::
is_same
<
T
,
uint64_t
>::
value
||
std
::
is_same
<
T
,
float16
>::
value
||
std
::
is_same
<
T
,
float
>::
value
||
std
::
is_same
<
T
,
double
>::
value
;
if
(
!
valid
)
{
MS_LOG
(
EXCEPTION
)
<<
"Type is invalid, T: "
<<
typeid
(
T
).
name
();
}
if
(
data_size_
==
0
)
{
return
""
;
}
if
(
data_
.
empty
())
{
MS_LOG
(
ERROR
)
<<
"data_ is empty, data_size_: "
<<
data_size_
;
return
""
;
}
std
::
ostringstream
ss
;
ssize_t
cursor
=
0
;
SummaryStringRecursive
(
ss
,
&
cursor
,
0
);
return
ss
.
str
();
}
private:
void
OutputDataString
(
std
::
ostringstream
&
ss
,
ssize_t
cursor
,
ssize_t
start
,
ssize_t
end
)
const
{
constexpr
auto
isFloat
=
std
::
is_same
<
T
,
float16
>::
value
||
std
::
is_same
<
T
,
float
>::
value
||
std
::
is_same
<
T
,
double
>::
value
;
constexpr
auto
isSigned
=
std
::
is_same
<
T
,
int8_t
>::
value
||
std
::
is_same
<
T
,
int16_t
>::
value
||
std
::
is_same
<
T
,
int32_t
>::
value
||
std
::
is_same
<
T
,
int64_t
>::
value
;
for
(
ssize_t
i
=
start
;
i
<
end
&&
(
cursor
+
i
)
<
static_cast
<
ssize_t
>
(
data_size_
);
i
++
)
{
if
(
isFloat
)
{
ss
<<
std
::
setw
(
15
)
<<
std
::
setprecision
(
8
)
<<
std
::
setiosflags
(
std
::
ios
::
scientific
|
std
::
ios
::
right
)
<<
data_
[
cursor
+
i
];
}
else
{
if
(
isSigned
&&
static_cast
<
int64_t
>
(
data_
[
cursor
+
i
])
>=
0
)
{
ss
<<
' '
;
}
ss
<<
data_
[
cursor
+
i
];
}
if
(
i
!=
end
-
1
)
{
ss
<<
' '
;
}
}
}
void
SummaryStringRecursive
(
std
::
ostringstream
&
ss
,
ssize_t
*
cursor
,
ssize_t
depth
)
const
{
if
(
depth
>=
static_cast
<
ssize_t
>
(
ndim_
))
{
return
;
}
ss
<<
'['
;
for
(
auto
value
:
data_
)
{
ss
<<
value
<<
','
;
if
(
depth
==
static_cast
<
ssize_t
>
(
ndim_
)
-
1
)
{
// Bottom dimension
ssize_t
num
=
shape_
[
depth
];
if
(
num
>
kThreshold
)
{
OutputDataString
(
ss
,
*
cursor
,
0
,
kThreshold
/
2
);
ss
<<
' '
<<
kEllipsis
<<
' '
;
OutputDataString
(
ss
,
*
cursor
,
num
-
kThreshold
/
2
,
num
);
}
else
{
OutputDataString
(
ss
,
*
cursor
,
0
,
num
);
}
*
cursor
+=
num
;
}
else
{
// Middle dimension
ssize_t
num
=
shape_
[
depth
];
// Handle the first half.
for
(
ssize_t
i
=
0
;
i
<
std
::
min
(
static_cast
<
ssize_t
>
(
kThreshold
/
2
),
num
);
i
++
)
{
if
(
i
>
0
)
{
ss
<<
'\n'
;
ss
<<
std
::
setw
(
depth
+
1
)
<<
' '
;
// Add the indent.
}
SummaryStringRecursive
(
ss
,
cursor
,
depth
+
1
);
}
// Handle the ignored part.
if
(
num
>
kThreshold
)
{
ss
<<
'\n'
;
ss
<<
std
::
setw
(
depth
+
1
)
<<
' '
;
// Add the indent.
ss
<<
kEllipsis
<<
'\n'
;
// Ignored at this layer.
ssize_t
ignored
=
shape_
[
depth
+
1
];
for
(
ssize_t
i
=
depth
+
2
;
i
<
static_cast
<
ssize_t
>
(
ndim_
);
i
++
)
{
ignored
*=
shape_
[
i
];
}
// Multiple with ignored layers number.
ignored
*=
num
-
kThreshold
;
*
cursor
+=
ignored
;
}
// Handle the second half.
if
(
num
>
kThreshold
/
2
)
{
for
(
ssize_t
i
=
num
-
kThreshold
/
2
;
i
<
num
;
i
++
)
{
ss
<<
'\n'
;
ss
<<
std
::
setw
(
depth
+
1
)
<<
' '
;
// Add the indent.
SummaryStringRecursive
(
ss
,
cursor
,
depth
+
1
);
}
}
}
ss
<<
']'
;
return
ss
.
str
();
}
private:
size_t
ndim_
{
0
};
size_t
data_size_
{
0
};
std
::
vector
<
T
>
data_
;
std
::
vector
<
int
>
shape_
;
};
template
<
typename
...
Args
>
...
...
@@ -297,7 +404,7 @@ std::string Tensor::ToString() const {
buf
<<
"Tensor shape:["
<<
shape
()
<<
"]"
<<
this
->
Dtype
()
->
ToString
();
// only print small tensor
if
(
DataSize
()
<
small_tensor_size
)
{
buf
<<
"
val
:"
<<
data
().
ToString
();
buf
<<
"
, value
:"
<<
data
().
ToString
();
}
return
buf
.
str
();
}
...
...
@@ -307,10 +414,20 @@ std::string Tensor::ToStringRepr() const {
auto
type_ptr
=
this
->
Dtype
();
MS_EXCEPTION_IF_NULL
(
type_ptr
);
buf
<<
"Tensor shape:["
<<
shape
()
<<
"]"
<<
type_ptr
->
ToString
();
buf
<<
"
\n
val:"
<<
data
().
ToString
();
buf
<<
"
\n
val
ue
:"
<<
data
().
ToString
();
return
buf
.
str
();
}
std
::
string
Tensor
::
ToStringSafe
()
{
data
().
CheckDataSafe
();
return
ToString
();
}
std
::
string
Tensor
::
ToStringReprSafe
()
{
data
().
CheckDataSafe
();
return
ToStringRepr
();
}
void
Tensor
::
data_sync
()
const
{
if
(
device_address_
!=
nullptr
)
{
if
(
!
device_address_
->
SyncDeviceToHost
(
shape
(),
static_cast
<
size_t
>
(
data
().
nbytes
()),
data_type
(),
data_c
()))
{
...
...
mindspore/ccsrc/ir/tensor.h
浏览文件 @
6124a4ed
...
...
@@ -54,8 +54,14 @@ class TensorData {
virtual
ssize_t
ndim
()
const
=
0
;
/// Data pointer.
virtual
void
*
data
()
=
0
;
/// Shape of data.
virtual
std
::
vector
<
int
>
shape
()
const
=
0
;
/// Is data equals.
virtual
bool
equals
(
const
TensorData
&
other
)
const
=
0
;
/// Check for lazy allocation.
virtual
void
CheckDataSafe
()
=
0
;
/// To string for lazy allocation.
virtual
std
::
string
ToStringSafe
()
=
0
;
/// To string.
virtual
std
::
string
ToString
()
const
=
0
;
};
...
...
@@ -180,7 +186,6 @@ class Tensor : public MetaTensor {
// brief Get Tensor data pointer for c++ type
//
// param writable true if writable, false if read only
// return The pointer to the object
void
*
data_c
()
{
return
data
().
data
();
}
...
...
@@ -217,6 +222,12 @@ class Tensor : public MetaTensor {
std
::
string
ToStringRepr
()
const
;
/// To string for lazy allocation.
std
::
string
ToStringSafe
();
/// To string for lazy allocation.
std
::
string
ToStringReprSafe
();
bool
is_init
()
{
return
init_flag_
;
}
void
set_init_flag
(
bool
flag
)
{
init_flag_
=
flag
;
}
...
...
mindspore/ccsrc/ir/tensor_py.cc
浏览文件 @
6124a4ed
...
...
@@ -351,8 +351,8 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
>>> data.set_dtype(mindspore.int32)
mindspore.int32
)mydelimiter"
)
.
def
(
"__str__"
,
&
Tensor
::
ToString
)
.
def
(
"__repr__"
,
&
Tensor
::
ToStringRepr
)
.
def
(
"__str__"
,
&
Tensor
::
ToString
Safe
)
.
def
(
"__repr__"
,
&
Tensor
::
ToStringRepr
Safe
)
.
def
(
py
::
pickle
(
[](
const
Tensor
&
t
)
{
// __getstate__
/* Return a tuple that fully encodes the state of the object */
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录