Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2c3c579b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
2c3c579b
编写于
11月 01, 2019
作者:
L
Leo Chen
提交者:
Zeng Jinle
11月 01, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tensor.set() supports array list and remove unused code, test=develop (#20959)
上级
eec4fa90
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
44 addition
and
117 deletion
+44
-117
paddle/fluid/pybind/tensor_py.h
paddle/fluid/pybind/tensor_py.h
+5
-117
python/paddle/fluid/tests/unittests/test_tensor.py
python/paddle/fluid/tests/unittests/test_tensor.py
+39
-0
未找到文件。
paddle/fluid/pybind/tensor_py.h
浏览文件 @
2c3c579b
...
...
@@ -140,7 +140,8 @@ void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
template
<
typename
T
,
typename
P
>
void
SetTensorFromPyArrayT
(
framework
::
Tensor
*
self
,
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
P
place
)
{
const
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
&
array
,
const
P
&
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
...
...
@@ -171,8 +172,9 @@ void SetTensorFromPyArrayT(
}
template
<
typename
P
>
void
SetTensorFromPyArray
(
framework
::
Tensor
*
self
,
pybind11
::
array
array
,
P
place
)
{
void
SetTensorFromPyArray
(
framework
::
Tensor
*
self
,
const
py
::
object
&
obj
,
const
P
&
place
)
{
auto
array
=
obj
.
cast
<
py
::
array
>
();
if
(
py
::
isinstance
<
py
::
array_t
<
float
>>
(
array
))
{
SetTensorFromPyArrayT
<
float
,
P
>
(
self
,
array
,
place
);
}
else
if
(
py
::
isinstance
<
py
::
array_t
<
int
>>
(
array
))
{
...
...
@@ -202,42 +204,6 @@ void SetTensorFromPyArray(framework::Tensor *self, pybind11::array array,
}
}
template
<
typename
T
>
void
PyCPUTensorSetFromArray
(
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
T
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
T
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
T
)
*
array
.
size
());
}
template
<
>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
inline
void
PyCPUTensorSetFromArray
(
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
uint16_t
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
platform
::
float16
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
uint16_t
)
*
array
.
size
());
}
template
<
typename
T
,
size_t
D
>
void
_sliceCompute
(
const
framework
::
Tensor
*
in
,
framework
::
Tensor
*
out
,
const
platform
::
CPUDeviceContext
&
ctx
,
...
...
@@ -485,84 +451,6 @@ inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
}
}
#ifdef PADDLE_WITH_CUDA
template
<
typename
T
>
void
PyCUDATensorSetFromArray
(
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
T
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CUDAPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
T
>
(
place
);
paddle
::
platform
::
GpuMemcpySync
(
dst
,
array
.
data
(),
sizeof
(
T
)
*
array
.
size
(),
cudaMemcpyHostToDevice
);
}
template
<
>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
inline
void
PyCUDATensorSetFromArray
(
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
uint16_t
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CUDAPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
platform
::
float16
>
(
place
);
paddle
::
platform
::
GpuMemcpySync
(
dst
,
array
.
data
(),
sizeof
(
uint16_t
)
*
array
.
size
(),
cudaMemcpyHostToDevice
);
}
template
<
typename
T
>
void
PyCUDAPinnedTensorSetFromArray
(
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
T
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
const
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
T
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
T
)
*
array
.
size
());
}
template
<
>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
inline
void
PyCUDAPinnedTensorSetFromArray
(
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
uint16_t
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
const
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
decltype
(
array
.
ndim
())
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
platform
::
float16
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
uint16_t
)
*
array
.
size
());
}
#endif
inline
py
::
array
TensorToPyArray
(
const
framework
::
Tensor
&
tensor
)
{
if
(
!
tensor
.
IsInitialized
())
{
return
py
::
array
();
...
...
python/paddle/fluid/tests/unittests/test_tensor.py
浏览文件 @
2c3c579b
...
...
@@ -280,6 +280,45 @@ class TestTensor(unittest.TestCase):
isinstance
(
tensor
.
_mutable_data
(
places
[
0
],
dtype
),
numbers
.
Integral
))
def
test_tensor_set_fp16
(
self
):
array
=
numpy
.
random
.
random
((
300
,
500
)).
astype
(
"float16"
)
tensor
=
fluid
.
Tensor
()
place
=
core
.
CPUPlace
()
tensor
.
set
(
array
,
place
)
self
.
assertEqual
(
tensor
.
_dtype
(),
core
.
VarDesc
.
VarType
.
FP16
)
self
.
assertTrue
(
numpy
.
array_equal
(
numpy
.
array
(
tensor
),
array
))
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
tensor
.
set
(
array
,
place
)
self
.
assertEqual
(
tensor
.
_dtype
(),
core
.
VarDesc
.
VarType
.
FP16
)
self
.
assertTrue
(
numpy
.
array_equal
(
numpy
.
array
(
tensor
),
array
))
place
=
core
.
CUDAPinnedPlace
()
tensor
.
set
(
array
,
place
)
self
.
assertEqual
(
tensor
.
_dtype
(),
core
.
VarDesc
.
VarType
.
FP16
)
self
.
assertTrue
(
numpy
.
array_equal
(
numpy
.
array
(
tensor
),
array
))
def
test_tensor_set_from_array_list
(
self
):
array
=
numpy
.
random
.
randint
(
1000
,
size
=
(
200
,
300
))
list_array
=
[
array
,
array
]
tensor
=
fluid
.
Tensor
()
place
=
core
.
CPUPlace
()
tensor
.
set
(
list_array
,
place
)
self
.
assertEqual
([
2
,
200
,
300
],
tensor
.
shape
())
self
.
assertTrue
(
numpy
.
array_equal
(
numpy
.
array
(
tensor
),
list_array
))
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
tensor
.
set
(
list_array
,
place
)
self
.
assertEqual
([
2
,
200
,
300
],
tensor
.
shape
())
self
.
assertTrue
(
numpy
.
array_equal
(
numpy
.
array
(
tensor
),
list_array
))
place
=
core
.
CUDAPinnedPlace
()
tensor
.
set
(
list_array
,
place
)
self
.
assertEqual
([
2
,
200
,
300
],
tensor
.
shape
())
self
.
assertTrue
(
numpy
.
array_equal
(
numpy
.
array
(
tensor
),
list_array
))
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录