Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
1543c4cf
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1543c4cf
编写于
4月 06, 2018
作者:
Y
Yi Wang
提交者:
GitHub
4月 06, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix cpplint errors of paddle/fluid/pybind and add some tests (#9694)
* cpplint test and add tesnor_py_test.cc * Update * Update
上级
6ba26257
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
291 addition
and
218 deletion
+291
-218
paddle/fluid/pybind/CMakeLists.txt
paddle/fluid/pybind/CMakeLists.txt
+2
-0
paddle/fluid/pybind/const_value.cc
paddle/fluid/pybind/const_value.cc
+6
-6
paddle/fluid/pybind/const_value.h
paddle/fluid/pybind/const_value.h
+5
-4
paddle/fluid/pybind/exception.cc
paddle/fluid/pybind/exception.cc
+4
-3
paddle/fluid/pybind/exception.h
paddle/fluid/pybind/exception.h
+5
-2
paddle/fluid/pybind/protobuf.cc
paddle/fluid/pybind/protobuf.cc
+143
-135
paddle/fluid/pybind/protobuf.h
paddle/fluid/pybind/protobuf.h
+7
-7
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+7
-7
paddle/fluid/pybind/recordio.cc
paddle/fluid/pybind/recordio.cc
+10
-2
paddle/fluid/pybind/recordio.h
paddle/fluid/pybind/recordio.h
+2
-1
paddle/fluid/pybind/tensor_py.h
paddle/fluid/pybind/tensor_py.h
+56
-51
paddle/fluid/pybind/tensor_py_test.cc
paddle/fluid/pybind/tensor_py_test.cc
+44
-0
未找到文件。
paddle/fluid/pybind/CMakeLists.txt
浏览文件 @
1543c4cf
...
...
@@ -15,4 +15,6 @@ if(WITH_PYTHON)
target_link_libraries
(
paddle_pybind rt
)
endif
(
NOT APPLE AND NOT ANDROID
)
endif
(
WITH_AMD_GPU
)
cc_test
(
tensor_py_test SRCS tensor_py_test.cc DEPS python
)
endif
(
WITH_PYTHON
)
paddle/fluid/pybind/const_value.cc
浏览文件 @
1543c4cf
...
...
@@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "const_value.h"
#include "
paddle/fluid/pybind/
const_value.h"
#include "paddle/fluid/framework/operator.h"
namespace
paddle
{
namespace
pybind
{
void
BindConstValue
(
pybind11
::
module
&
m
)
{
m
.
def
(
"kEmptyVarName"
,
[]
{
return
framework
::
kEmptyVarName
;
});
m
.
def
(
"kTempVarName"
,
[]
{
return
framework
::
kTempVarName
;
});
m
.
def
(
"kGradVarSuffix"
,
[]
{
return
framework
::
kGradVarSuffix
;
});
m
.
def
(
"kZeroVarSuffix"
,
[]
{
return
framework
::
kZeroVarSuffix
;
});
void
BindConstValue
(
pybind11
::
module
*
m
)
{
m
->
def
(
"kEmptyVarName"
,
[]
{
return
framework
::
kEmptyVarName
;
});
m
->
def
(
"kTempVarName"
,
[]
{
return
framework
::
kTempVarName
;
});
m
->
def
(
"kGradVarSuffix"
,
[]
{
return
framework
::
kGradVarSuffix
;
});
m
->
def
(
"kZeroVarSuffix"
,
[]
{
return
framework
::
kZeroVarSuffix
;
});
}
}
// namespace pybind
...
...
paddle/fluid/pybind/const_value.h
浏览文件 @
1543c4cf
...
...
@@ -11,16 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <Python.h>
#include "paddle/fluid/platform/enforce.h"
#include "pybind11/pybind11.h"
namespace
py
=
pybind11
;
namespace
paddle
{
namespace
pybind
{
extern
void
BindConstValue
(
pybind11
::
module
&
m
);
void
BindConstValue
(
pybind11
::
module
*
m
);
}
// namespace pybind
}
// namespace paddle
paddle/fluid/pybind/exception.cc
浏览文件 @
1543c4cf
...
...
@@ -17,8 +17,8 @@ limitations under the License. */
namespace
paddle
{
namespace
pybind
{
void
BindException
(
pybind11
::
module
&
m
)
{
static
pybind11
::
exception
<
platform
::
EnforceNotMet
>
exc
(
m
,
"EnforceNotMet"
);
void
BindException
(
pybind11
::
module
*
m
)
{
static
pybind11
::
exception
<
platform
::
EnforceNotMet
>
exc
(
*
m
,
"EnforceNotMet"
);
pybind11
::
register_exception_translator
([](
std
::
exception_ptr
p
)
{
try
{
if
(
p
)
std
::
rethrow_exception
(
p
);
...
...
@@ -27,7 +27,8 @@ void BindException(pybind11::module& m) {
}
});
m
.
def
(
"__unittest_throw_exception__"
,
[]
{
PADDLE_THROW
(
"test exception"
);
});
m
->
def
(
"__unittest_throw_exception__"
,
[]
{
PADDLE_THROW
(
"test exception"
);
});
}
}
// namespace pybind
...
...
paddle/fluid/pybind/exception.h
浏览文件 @
1543c4cf
...
...
@@ -11,14 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <Python.h>
#include "paddle/fluid/platform/enforce.h"
#include "pybind11/pybind11.h"
namespace
paddle
{
namespace
pybind
{
extern
void
BindException
(
pybind11
::
module
&
m
);
void
BindException
(
pybind11
::
module
*
m
);
}
// namespace pybind
}
// namespace paddle
paddle/fluid/pybind/protobuf.cc
浏览文件 @
1543c4cf
...
...
@@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/pybind/protobuf.h"
#include <deque>
#include <iostream>
#include <string>
#include <tuple>
#include "paddle/fluid/framework/backward.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
...
...
@@ -97,10 +98,11 @@ struct type_caster<boost::variant<Args...>>
namespace
paddle
{
namespace
pybind
{
using
namespace
paddle
::
framework
;
// NOLINT
namespace
pd
=
paddle
::
framework
;
template
<
typename
T
>
static
py
::
bytes
SerializeMessage
(
T
&
self
)
{
// NOLINT
static
pybind11
::
bytes
SerializeMessage
(
T
&
self
)
{
// NOLINT due to pybind11 convention.
// Check IsInitialized in Python
std
::
string
retv
;
PADDLE_ENFORCE
(
self
.
Proto
()
->
SerializePartialToString
(
&
retv
),
...
...
@@ -109,24 +111,24 @@ static py::bytes SerializeMessage(T &self) { // NOLINT
}
// Bind Methods
void
BindProgramDesc
(
py
::
module
&
m
)
{
// NOLINT
py
::
class_
<
ProgramDesc
>
(
m
,
"ProgramDesc"
,
""
)
.
def
(
py
::
init
<>
())
void
BindProgramDesc
(
py
bind11
::
module
*
m
)
{
py
bind11
::
class_
<
pd
::
ProgramDesc
>
(
*
m
,
"ProgramDesc"
,
""
)
.
def
(
py
bind11
::
init
<>
())
.
def
(
"__init__"
,
[](
ProgramDesc
&
self
,
const
ProgramDesc
&
other
)
{
new
(
&
self
)
ProgramDesc
(
other
);
[](
pd
::
ProgramDesc
&
self
,
const
pd
::
ProgramDesc
&
other
)
{
new
(
&
self
)
pd
::
ProgramDesc
(
other
);
})
.
def
(
"__init__"
,
[](
ProgramDesc
&
self
,
const
py
::
bytes
&
binary_str
)
{
[](
pd
::
ProgramDesc
&
self
,
const
pybind11
::
bytes
&
binary_str
)
{
std
::
string
str
(
binary_str
);
new
(
&
self
)
ProgramDesc
(
str
);
new
(
&
self
)
pd
::
ProgramDesc
(
str
);
})
.
def
(
"append_block"
,
&
ProgramDesc
::
AppendBlock
,
py
::
return_value_policy
::
reference
)
.
def
(
"append_block"
,
&
pd
::
ProgramDesc
::
AppendBlock
,
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"append_backward"
,
[](
ProgramDesc
&
program_desc
,
const
VarDesc
&
target
,
[](
pd
::
ProgramDesc
&
program_desc
,
const
pd
::
VarDesc
&
target
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_vars
)
{
ParamGradInfoMap
param_grad_map
=
pd
::
ParamGradInfoMap
param_grad_map
=
AppendBackward
(
program_desc
,
target
,
no_grad_vars
);
std
::
unordered_map
<
std
::
string
,
std
::
tuple
<
std
::
string
/* grad_var_name */
,
...
...
@@ -140,178 +142,184 @@ void BindProgramDesc(py::module &m) { // NOLINT
}
return
retv
;
})
.
def
(
"block"
,
&
ProgramDesc
::
MutableBlock
,
py
::
return_value_policy
::
reference
)
.
def
(
"num_blocks"
,
&
ProgramDesc
::
Size
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
ProgramDesc
>
)
.
def
(
"block"
,
&
pd
::
ProgramDesc
::
MutableBlock
,
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"num_blocks"
,
&
pd
::
ProgramDesc
::
Size
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
pd
::
ProgramDesc
>
)
.
def
(
"parse_from_string"
,
[](
ProgramDesc
&
program_desc
,
const
std
::
string
&
data
)
{
proto
::
ProgramDesc
*
desc
=
program_desc
.
Proto
();
[](
pd
::
ProgramDesc
&
program_desc
,
const
std
::
string
&
data
)
{
p
d
::
p
roto
::
ProgramDesc
*
desc
=
program_desc
.
Proto
();
PADDLE_ENFORCE
(
desc
->
ParseFromString
(
data
),
"Fail to parse ProgramDesc from string. This could "
"be a bug of Paddle."
);
});
}
void
BindBlockDesc
(
py
::
module
&
m
)
{
// NOLINT
py
::
class_
<
BlockDesc
>
(
m
,
"BlockDesc"
,
""
)
.
def_property_readonly
(
"id"
,
&
BlockDesc
::
ID
)
.
def_property_readonly
(
"parent"
,
&
BlockDesc
::
Parent
)
.
def
(
"get_forward_block_idx"
,
&
BlockDesc
::
ForwardBlockID
)
.
def
(
"set_forward_block_idx"
,
&
BlockDesc
::
SetForwardBlockID
)
.
def
(
"append_op"
,
&
BlockDesc
::
AppendOp
,
py
::
return_value_policy
::
reference
)
.
def
(
"prepend_op"
,
&
BlockDesc
::
PrependOp
,
py
::
return_value_policy
::
reference
)
.
def
(
"insert_op"
,
&
BlockDesc
::
InsertOp
,
py
::
return_value_policy
::
reference
)
.
def
(
"remove_op"
,
&
BlockDesc
::
RemoveOp
)
void
BindBlockDesc
(
py
bind11
::
module
*
m
)
{
py
bind11
::
class_
<
pd
::
BlockDesc
>
(
*
m
,
"BlockDesc"
,
""
)
.
def_property_readonly
(
"id"
,
&
pd
::
BlockDesc
::
ID
)
.
def_property_readonly
(
"parent"
,
&
pd
::
BlockDesc
::
Parent
)
.
def
(
"get_forward_block_idx"
,
&
pd
::
BlockDesc
::
ForwardBlockID
)
.
def
(
"set_forward_block_idx"
,
&
pd
::
BlockDesc
::
SetForwardBlockID
)
.
def
(
"append_op"
,
&
pd
::
BlockDesc
::
AppendOp
,
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"prepend_op"
,
&
pd
::
BlockDesc
::
PrependOp
,
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"insert_op"
,
&
pd
::
BlockDesc
::
InsertOp
,
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"remove_op"
,
&
pd
::
BlockDesc
::
RemoveOp
)
.
def
(
"var"
,
[](
BlockDesc
&
self
,
py
::
bytes
byte_name
)
{
[](
pd
::
BlockDesc
&
self
,
pybind11
::
bytes
byte_name
)
{
std
::
string
name
=
byte_name
;
return
self
.
Var
(
name
);
},
py
::
return_value_policy
::
reference
)
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"has_var"
,
[](
BlockDesc
&
self
,
py
::
bytes
byte_name
)
{
[](
pd
::
BlockDesc
&
self
,
pybind11
::
bytes
byte_name
)
{
std
::
string
name
=
byte_name
;
return
self
.
HasVar
(
name
);
},
py
::
return_value_policy
::
reference
)
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"rename_var"
,
[](
BlockDesc
&
self
,
const
py
::
bytes
&
byte_name
,
const
py
::
bytes
&
byte_name_new
)
{
[](
pd
::
BlockDesc
&
self
,
const
pybind11
::
bytes
&
byte_name
,
const
py
bind11
::
bytes
&
byte_name_new
)
{
std
::
string
name
=
byte_name
;
std
::
string
new_name
=
byte_name_new
;
self
.
RenameVar
(
name
,
new_name
);
})
.
def
(
"has_var_recursive"
,
[](
BlockDesc
&
self
,
py
::
bytes
byte_name
)
{
[](
pd
::
BlockDesc
&
self
,
pybind11
::
bytes
byte_name
)
{
std
::
string
name
=
byte_name
;
return
self
.
HasVarRecursive
(
name
);
})
.
def
(
"find_var"
,
[](
BlockDesc
&
self
,
py
::
bytes
byte_name
)
{
[](
pd
::
BlockDesc
&
self
,
pybind11
::
bytes
byte_name
)
{
std
::
string
name
=
byte_name
;
return
self
.
FindVar
(
name
);
},
py
::
return_value_policy
::
reference
)
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"find_var_recursive"
,
[](
BlockDesc
&
self
,
py
::
bytes
byte_name
)
{
[](
pd
::
BlockDesc
&
self
,
pybind11
::
bytes
byte_name
)
{
std
::
string
name
=
byte_name
;
return
self
.
FindVarRecursive
(
name
);
},
py
::
return_value_policy
::
reference
)
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"remove_var"
,
[](
BlockDesc
&
self
,
py
::
bytes
byte_name
)
{
[](
pd
::
BlockDesc
&
self
,
pybind11
::
bytes
byte_name
)
{
std
::
string
name
=
byte_name
;
return
self
.
RemoveVar
(
name
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"all_vars"
,
&
BlockDesc
::
AllVars
,
py
::
return_value_policy
::
reference
)
.
def
(
"op_size"
,
&
BlockDesc
::
OpSize
)
.
def
(
"op"
,
&
BlockDesc
::
Op
,
py
::
return_value_policy
::
reference
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
BlockDesc
>
);
pybind11
::
return_value_policy
::
reference
)
.
def
(
"all_vars"
,
&
pd
::
BlockDesc
::
AllVars
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"op_size"
,
&
pd
::
BlockDesc
::
OpSize
)
.
def
(
"op"
,
&
pd
::
BlockDesc
::
Op
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
pd
::
BlockDesc
>
);
}
void
BindVarDsec
(
py
::
module
&
m
)
{
// NOLINT
py
::
class_
<
VarDesc
>
var_desc
(
m
,
"VarDesc"
,
""
);
void
BindVarDsec
(
py
bind11
::
module
*
m
)
{
py
bind11
::
class_
<
pd
::
VarDesc
>
var_desc
(
*
m
,
"VarDesc"
,
""
);
var_desc
.
def
(
"name"
,
[](
VarDesc
&
self
)
{
py
::
bytes
name
=
self
.
Name
();
[](
pd
::
VarDesc
&
self
)
{
py
bind11
::
bytes
name
=
self
.
Name
();
return
name
;
},
py
::
return_value_policy
::
reference
)
.
def
(
"set_name"
,
&
VarDesc
::
SetName
)
.
def
(
"set_shape"
,
&
VarDesc
::
SetShape
)
.
def
(
"set_shapes"
,
&
VarDesc
::
SetShapes
)
.
def
(
"set_dtype"
,
&
VarDesc
::
SetDataType
)
.
def
(
"set_dtypes"
,
&
VarDesc
::
SetDataTypes
)
.
def
(
"set_capacity"
,
&
VarDesc
::
SetCapacity
)
.
def
(
"shape"
,
&
VarDesc
::
GetShape
,
py
::
return_value_policy
::
reference
)
.
def
(
"shapes"
,
&
VarDesc
::
GetShapes
,
py
::
return_value_policy
::
reference
)
.
def
(
"dtype"
,
&
VarDesc
::
GetDataType
,
py
::
return_value_policy
::
reference
)
.
def
(
"dtypes"
,
&
VarDesc
::
GetDataTypes
,
py
::
return_value_policy
::
reference
)
.
def
(
"lod_level"
,
&
VarDesc
::
GetLoDLevel
)
.
def
(
"lod_levels"
,
&
VarDesc
::
GetLoDLevels
,
py
::
return_value_policy
::
reference
)
.
def
(
"set_lod_level"
,
&
VarDesc
::
SetLoDLevel
)
.
def
(
"set_lod_levels"
,
&
VarDesc
::
SetLoDLevels
)
.
def
(
"type"
,
&
VarDesc
::
GetType
)
.
def
(
"set_type"
,
&
VarDesc
::
SetType
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
VarDesc
>
)
.
def
(
"persistable"
,
&
VarDesc
::
Persistable
)
.
def
(
"set_persistable"
,
&
VarDesc
::
SetPersistable
);
pybind11
::
return_value_policy
::
reference
)
.
def
(
"set_name"
,
&
pd
::
VarDesc
::
SetName
)
.
def
(
"set_shape"
,
&
pd
::
VarDesc
::
SetShape
)
.
def
(
"set_shapes"
,
&
pd
::
VarDesc
::
SetShapes
)
.
def
(
"set_dtype"
,
&
pd
::
VarDesc
::
SetDataType
)
.
def
(
"set_dtypes"
,
&
pd
::
VarDesc
::
SetDataTypes
)
.
def
(
"set_capacity"
,
&
pd
::
VarDesc
::
SetCapacity
)
.
def
(
"shape"
,
&
pd
::
VarDesc
::
GetShape
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"shapes"
,
&
pd
::
VarDesc
::
GetShapes
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"dtype"
,
&
pd
::
VarDesc
::
GetDataType
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"dtypes"
,
&
pd
::
VarDesc
::
GetDataTypes
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"lod_level"
,
&
pd
::
VarDesc
::
GetLoDLevel
)
.
def
(
"lod_levels"
,
&
pd
::
VarDesc
::
GetLoDLevels
,
pybind11
::
return_value_policy
::
reference
)
.
def
(
"set_lod_level"
,
&
pd
::
VarDesc
::
SetLoDLevel
)
.
def
(
"set_lod_levels"
,
&
pd
::
VarDesc
::
SetLoDLevels
)
.
def
(
"type"
,
&
pd
::
VarDesc
::
GetType
)
.
def
(
"set_type"
,
&
pd
::
VarDesc
::
SetType
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
pd
::
VarDesc
>
)
.
def
(
"persistable"
,
&
pd
::
VarDesc
::
Persistable
)
.
def
(
"set_persistable"
,
&
pd
::
VarDesc
::
SetPersistable
);
py
::
enum_
<
proto
::
VarType
::
Type
>
(
var_desc
,
"VarType"
,
""
)
.
value
(
"BOOL"
,
proto
::
VarType
::
BOOL
)
.
value
(
"INT16"
,
proto
::
VarType
::
INT16
)
.
value
(
"INT32"
,
proto
::
VarType
::
INT32
)
.
value
(
"INT64"
,
proto
::
VarType
::
INT64
)
.
value
(
"FP16"
,
proto
::
VarType
::
FP16
)
.
value
(
"FP32"
,
proto
::
VarType
::
FP32
)
.
value
(
"FP64"
,
proto
::
VarType
::
FP64
)
.
value
(
"LOD_TENSOR"
,
proto
::
VarType
::
LOD_TENSOR
)
.
value
(
"SELECTED_ROWS"
,
proto
::
VarType
::
SELECTED_ROWS
)
.
value
(
"FEED_MINIBATCH"
,
proto
::
VarType
::
FEED_MINIBATCH
)
.
value
(
"FETCH_LIST"
,
proto
::
VarType
::
FETCH_LIST
)
.
value
(
"STEP_SCOPES"
,
proto
::
VarType
::
STEP_SCOPES
)
.
value
(
"LOD_RANK_TABLE"
,
proto
::
VarType
::
LOD_RANK_TABLE
)
.
value
(
"LOD_TENSOR_ARRAY"
,
proto
::
VarType
::
LOD_TENSOR_ARRAY
)
.
value
(
"CHANNEL"
,
proto
::
VarType
::
CHANNEL
)
.
value
(
"PLACE_LIST"
,
proto
::
VarType
::
PLACE_LIST
)
.
value
(
"READER"
,
proto
::
VarType
::
READER
)
.
value
(
"RAW"
,
proto
::
VarType
::
RAW
);
py
bind11
::
enum_
<
pd
::
proto
::
VarType
::
Type
>
(
var_desc
,
"VarType"
,
""
)
.
value
(
"BOOL"
,
p
d
::
p
roto
::
VarType
::
BOOL
)
.
value
(
"INT16"
,
p
d
::
p
roto
::
VarType
::
INT16
)
.
value
(
"INT32"
,
p
d
::
p
roto
::
VarType
::
INT32
)
.
value
(
"INT64"
,
p
d
::
p
roto
::
VarType
::
INT64
)
.
value
(
"FP16"
,
p
d
::
p
roto
::
VarType
::
FP16
)
.
value
(
"FP32"
,
p
d
::
p
roto
::
VarType
::
FP32
)
.
value
(
"FP64"
,
p
d
::
p
roto
::
VarType
::
FP64
)
.
value
(
"LOD_TENSOR"
,
p
d
::
p
roto
::
VarType
::
LOD_TENSOR
)
.
value
(
"SELECTED_ROWS"
,
p
d
::
p
roto
::
VarType
::
SELECTED_ROWS
)
.
value
(
"FEED_MINIBATCH"
,
p
d
::
p
roto
::
VarType
::
FEED_MINIBATCH
)
.
value
(
"FETCH_LIST"
,
p
d
::
p
roto
::
VarType
::
FETCH_LIST
)
.
value
(
"STEP_SCOPES"
,
p
d
::
p
roto
::
VarType
::
STEP_SCOPES
)
.
value
(
"LOD_RANK_TABLE"
,
p
d
::
p
roto
::
VarType
::
LOD_RANK_TABLE
)
.
value
(
"LOD_TENSOR_ARRAY"
,
p
d
::
p
roto
::
VarType
::
LOD_TENSOR_ARRAY
)
.
value
(
"CHANNEL"
,
p
d
::
p
roto
::
VarType
::
CHANNEL
)
.
value
(
"PLACE_LIST"
,
p
d
::
p
roto
::
VarType
::
PLACE_LIST
)
.
value
(
"READER"
,
p
d
::
p
roto
::
VarType
::
READER
)
.
value
(
"RAW"
,
p
d
::
p
roto
::
VarType
::
RAW
);
}
void
BindOpDesc
(
py
::
module
&
m
)
{
// NOLINT
py
::
enum_
<
proto
::
AttrType
>
(
m
,
"AttrType"
,
""
)
.
value
(
"INT"
,
proto
::
AttrType
::
INT
)
.
value
(
"INTS"
,
proto
::
AttrType
::
INTS
)
.
value
(
"FLOAT"
,
proto
::
AttrType
::
FLOAT
)
.
value
(
"FLOATS"
,
proto
::
AttrType
::
FLOATS
)
.
value
(
"STRING"
,
proto
::
AttrType
::
STRING
)
.
value
(
"STRINGS"
,
proto
::
AttrType
::
STRINGS
)
.
value
(
"BOOL"
,
proto
::
AttrType
::
BOOLEAN
)
.
value
(
"BOOLS"
,
proto
::
AttrType
::
BOOLEANS
)
.
value
(
"BLOCK"
,
proto
::
AttrType
::
BLOCK
);
void
BindOpDesc
(
py
bind11
::
module
*
m
)
{
py
bind11
::
enum_
<
pd
::
proto
::
AttrType
>
(
*
m
,
"AttrType"
,
""
)
.
value
(
"INT"
,
p
d
::
p
roto
::
AttrType
::
INT
)
.
value
(
"INTS"
,
p
d
::
p
roto
::
AttrType
::
INTS
)
.
value
(
"FLOAT"
,
p
d
::
p
roto
::
AttrType
::
FLOAT
)
.
value
(
"FLOATS"
,
p
d
::
p
roto
::
AttrType
::
FLOATS
)
.
value
(
"STRING"
,
p
d
::
p
roto
::
AttrType
::
STRING
)
.
value
(
"STRINGS"
,
p
d
::
p
roto
::
AttrType
::
STRINGS
)
.
value
(
"BOOL"
,
p
d
::
p
roto
::
AttrType
::
BOOLEAN
)
.
value
(
"BOOLS"
,
p
d
::
p
roto
::
AttrType
::
BOOLEANS
)
.
value
(
"BLOCK"
,
p
d
::
p
roto
::
AttrType
::
BLOCK
);
py
::
class_
<
OpDesc
>
op_desc
(
m
,
"OpDesc"
,
""
);
py
bind11
::
class_
<
pd
::
OpDesc
>
op_desc
(
*
m
,
"OpDesc"
,
""
);
op_desc
.
def
(
"__init__"
,
[](
OpDesc
&
self
)
{
new
(
&
self
)
OpDesc
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"copy_from"
,
&
OpDesc
::
CopyFrom
)
.
def
(
"type"
,
&
OpDesc
::
Type
)
.
def
(
"set_type"
,
&
OpDesc
::
SetType
)
.
def
(
"input"
,
&
OpDesc
::
Input
)
.
def
(
"input_names"
,
&
OpDesc
::
InputNames
)
.
def
(
"output"
,
&
OpDesc
::
Output
)
.
def
(
"output_names"
,
&
OpDesc
::
OutputNames
)
.
def
(
"set_input"
,
&
OpDesc
::
SetInput
)
.
def
(
"set_output"
,
&
OpDesc
::
SetOutput
)
.
def
(
"input_arg_names"
,
&
OpDesc
::
InputArgumentNames
)
.
def
(
"output_arg_names"
,
&
OpDesc
::
OutputArgumentNames
)
.
def
(
"rename_input"
,
&
OpDesc
::
RenameInput
)
.
def
(
"rename_output"
,
&
OpDesc
::
RenameOutput
)
.
def
(
"has_attr"
,
&
OpDesc
::
HasAttr
)
.
def
(
"attr_type"
,
&
OpDesc
::
GetAttrType
)
.
def
(
"attr_names"
,
&
OpDesc
::
AttrNames
)
.
def
(
"set_attr"
,
&
OpDesc
::
SetAttr
)
.
def
(
"attr"
,
&
OpDesc
::
GetAttr
)
.
def
(
"set_block_attr"
,
&
OpDesc
::
SetBlockAttr
)
.
def
(
"__init__"
,
[](
pd
::
OpDesc
&
self
)
{
new
(
&
self
)
pd
::
OpDesc
();
},
py
bind11
::
return_value_policy
::
reference
)
.
def
(
"copy_from"
,
&
pd
::
OpDesc
::
CopyFrom
)
.
def
(
"type"
,
&
pd
::
OpDesc
::
Type
)
.
def
(
"set_type"
,
&
pd
::
OpDesc
::
SetType
)
.
def
(
"input"
,
&
pd
::
OpDesc
::
Input
)
.
def
(
"input_names"
,
&
pd
::
OpDesc
::
InputNames
)
.
def
(
"output"
,
&
pd
::
OpDesc
::
Output
)
.
def
(
"output_names"
,
&
pd
::
OpDesc
::
OutputNames
)
.
def
(
"set_input"
,
&
pd
::
OpDesc
::
SetInput
)
.
def
(
"set_output"
,
&
pd
::
OpDesc
::
SetOutput
)
.
def
(
"input_arg_names"
,
&
pd
::
OpDesc
::
InputArgumentNames
)
.
def
(
"output_arg_names"
,
&
pd
::
OpDesc
::
OutputArgumentNames
)
.
def
(
"rename_input"
,
&
pd
::
OpDesc
::
RenameInput
)
.
def
(
"rename_output"
,
&
pd
::
OpDesc
::
RenameOutput
)
.
def
(
"has_attr"
,
&
pd
::
OpDesc
::
HasAttr
)
.
def
(
"attr_type"
,
&
pd
::
OpDesc
::
GetAttrType
)
.
def
(
"attr_names"
,
&
pd
::
OpDesc
::
AttrNames
)
.
def
(
"set_attr"
,
&
pd
::
OpDesc
::
SetAttr
)
.
def
(
"attr"
,
&
pd
::
OpDesc
::
GetAttr
)
.
def
(
"set_block_attr"
,
&
pd
::
OpDesc
::
SetBlockAttr
)
.
def
(
"set_serialized_attr"
,
[](
OpDesc
&
self
,
const
std
::
string
&
name
,
const
py
::
bytes
&
seriralized
)
{
[](
pd
::
OpDesc
&
self
,
const
std
::
string
&
name
,
const
py
bind11
::
bytes
&
seriralized
)
{
std
::
string
ser
(
seriralized
);
self
.
SetAttr
(
name
,
ser
);
})
.
def
(
"block_attr"
,
&
OpDesc
::
GetBlockAttr
)
.
def
(
"check_attrs"
,
&
OpDesc
::
CheckAttrs
)
.
def
(
"infer_shape"
,
&
OpDesc
::
InferShape
)
.
def
(
"infer_var_type"
,
&
OpDesc
::
InferVarType
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
OpDesc
>
)
.
def
(
"block"
,
&
OpDesc
::
Block
,
py
::
return_value_policy
::
reference
);
.
def
(
"block_attr"
,
&
pd
::
OpDesc
::
GetBlockAttr
)
.
def
(
"check_attrs"
,
&
pd
::
OpDesc
::
CheckAttrs
)
.
def
(
"infer_shape"
,
&
pd
::
OpDesc
::
InferShape
)
.
def
(
"infer_var_type"
,
&
pd
::
OpDesc
::
InferVarType
)
.
def
(
"serialize_to_string"
,
SerializeMessage
<
pd
::
OpDesc
>
)
.
def
(
"block"
,
&
pd
::
OpDesc
::
Block
,
pybind11
::
return_value_policy
::
reference
);
}
}
// namespace pybind
...
...
paddle/fluid/pybind/protobuf.h
浏览文件 @
1543c4cf
...
...
@@ -11,25 +11,25 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <Python.h>
#include <fstream>
#include <vector>
#include "paddle/fluid/platform/variant.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace
py
=
pybind11
;
namespace
paddle
{
namespace
pybind
{
void
BindProgramDesc
(
py
::
module
&
m
);
void
BindBlockDesc
(
py
::
module
&
m
);
void
BindVarDsec
(
py
::
module
&
m
);
void
BindOpDesc
(
py
::
module
&
m
);
void
BindProgramDesc
(
pybind11
::
module
*
m
);
void
BindBlockDesc
(
pybind11
::
module
*
m
);
void
BindVarDsec
(
pybind11
::
module
*
m
);
void
BindOpDesc
(
pybind11
::
module
*
m
);
}
// namespace pybind
}
// namespace paddle
paddle/fluid/pybind/pybind.cc
浏览文件 @
1543c4cf
...
...
@@ -74,7 +74,7 @@ PYBIND11_PLUGIN(core) {
// not cause namespace pollution.
using
namespace
paddle
::
framework
;
// NOLINT
BindException
(
m
);
BindException
(
&
m
);
py
::
class_
<
Tensor
>
(
m
,
"Tensor"
,
py
::
buffer_protocol
())
.
def_buffer
(
...
...
@@ -478,11 +478,11 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"set_feed_variable"
,
framework
::
SetFeedVariable
);
m
.
def
(
"get_fetch_variable"
,
framework
::
GetFetchVariable
);
BindProgramDesc
(
m
);
BindBlockDesc
(
m
);
BindVarDsec
(
m
);
BindOpDesc
(
m
);
BindConstValue
(
m
);
BindProgramDesc
(
&
m
);
BindBlockDesc
(
&
m
);
BindVarDsec
(
&
m
);
BindOpDesc
(
&
m
);
BindConstValue
(
&
m
);
py
::
class_
<
framework
::
LoDRankTable
>
(
m
,
"LodRankTable"
)
.
def
(
"items"
,
[](
framework
::
LoDRankTable
&
table
)
{
...
...
@@ -553,7 +553,7 @@ All parameter, weight, gradient are variables in Paddle.
})
.
def
(
"run"
,
&
ParallelExecutor
::
Run
);
BindRecordIOWriter
(
m
);
BindRecordIOWriter
(
&
m
);
return
m
.
ptr
();
}
}
// namespace pybind
...
...
paddle/fluid/pybind/recordio.cc
浏览文件 @
1543c4cf
...
...
@@ -13,13 +13,19 @@
// limitations under the License.
#include "paddle/fluid/pybind/recordio.h"
#include <fstream>
#include <string>
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/recordio/writer.h"
namespace
paddle
{
namespace
pybind
{
namespace
{
class
RecordIOWriter
{
public:
RecordIOWriter
(
const
std
::
string
&
filename
,
recordio
::
Compressor
compressor
,
...
...
@@ -49,8 +55,10 @@ class RecordIOWriter {
recordio
::
Writer
writer_
;
};
void
BindRecordIOWriter
(
py
::
module
&
m
)
{
py
::
class_
<
RecordIOWriter
>
writer
(
m
,
"RecordIOWriter"
,
""
);
}
// namespace
void
BindRecordIOWriter
(
py
::
module
*
m
)
{
py
::
class_
<
RecordIOWriter
>
writer
(
*
m
,
"RecordIOWriter"
,
""
);
py
::
enum_
<
recordio
::
Compressor
>
(
writer
,
"Compressor"
,
""
)
.
value
(
"Snappy"
,
recordio
::
Compressor
::
kSnappy
)
.
value
(
"NoCompress"
,
recordio
::
Compressor
::
kNoCompress
);
...
...
paddle/fluid/pybind/recordio.h
浏览文件 @
1543c4cf
...
...
@@ -21,6 +21,7 @@ namespace py = pybind11;
namespace
paddle
{
namespace
pybind
{
extern
void
BindRecordIOWriter
(
py
::
module
&
m
);
void
BindRecordIOWriter
(
py
::
module
*
m
);
}
// namespace pybind
}
// namespace paddle
paddle/fluid/pybind/tensor_py.h
浏览文件 @
1543c4cf
...
...
@@ -23,12 +23,8 @@ limitations under the License. */
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
namespace
py
=
pybind11
;
namespace
paddle
{
namespace
pybind
{
namespace
details
{
template
<
bool
less
,
size_t
I
,
typename
...
ARGS
>
...
...
@@ -36,16 +32,16 @@ struct CastToPyBufferImpl;
template
<
size_t
I
,
typename
...
ARGS
>
struct
CastToPyBufferImpl
<
false
,
I
,
ARGS
...
>
{
py
::
buffer_info
operator
()(
framework
::
Tensor
&
tensor
)
{
py
bind11
::
buffer_info
operator
()(
const
framework
::
Tensor
&
tensor
)
{
PADDLE_THROW
(
"This type of tensor cannot be expose to Python"
);
return
py
::
buffer_info
();
return
py
bind11
::
buffer_info
();
}
};
template
<
size_t
I
,
typename
...
ARGS
>
struct
CastToPyBufferImpl
<
true
,
I
,
ARGS
...
>
{
using
CUR_TYPE
=
typename
std
::
tuple_element
<
I
,
std
::
tuple
<
ARGS
...
>>::
type
;
py
::
buffer_info
operator
()(
framework
::
Tensor
&
tensor
)
{
py
bind11
::
buffer_info
operator
()(
const
framework
::
Tensor
&
tensor
)
{
if
(
std
::
type_index
(
typeid
(
CUR_TYPE
))
==
tensor
.
type
())
{
auto
dim_vec
=
framework
::
vectorize
(
tensor
.
dims
());
std
::
vector
<
size_t
>
dims_outside
;
...
...
@@ -84,15 +80,15 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
if
(
std
::
type_index
(
typeid
(
CUR_TYPE
))
==
std
::
type_index
(
typeid
(
platform
::
float16
)))
{
return
py
::
buffer_info
(
dst_tensor
.
data
<
CUR_TYPE
>
(),
sizeof
(
CUR_TYPE
),
"e"
,
/* np.dtype('e') == np.float16 */
(
size_t
)
framework
::
arity
(
dst_tensor
.
dims
()),
dims_outside
,
strides
);
return
py
bind11
::
buffer_info
(
dst_tensor
.
data
<
CUR_TYPE
>
(),
sizeof
(
CUR_TYPE
),
"e"
,
/* np.dtype('e') == np.float16 */
(
size_t
)
framework
::
arity
(
dst_tensor
.
dims
()),
dims_outside
,
strides
);
}
else
{
return
py
::
buffer_info
(
dst_tensor
.
data
<
CUR_TYPE
>
(),
sizeof
(
CUR_TYPE
),
py
::
format_descriptor
<
CUR_TYPE
>::
format
(
),
(
size_t
)
framework
::
arity
(
dst_tensor
.
dims
()
),
dims_outside
,
strides
);
return
py
bind11
::
buffer_info
(
dst_tensor
.
data
<
CUR_TYPE
>
(),
sizeof
(
CUR_TYPE
),
pybind11
::
format_descriptor
<
CUR_TYPE
>::
format
(
),
(
size_t
)
framework
::
arity
(
dst_tensor
.
dims
()),
dims_outside
,
strides
);
}
}
else
{
constexpr
bool
less
=
I
+
1
<
std
::
tuple_size
<
std
::
tuple
<
ARGS
...
>>::
value
;
...
...
@@ -103,7 +99,7 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
}
// namespace details
inline
py
::
buffer_info
CastToPyBuffer
(
framework
::
Tensor
&
tensor
)
{
inline
py
bind11
::
buffer_info
CastToPyBuffer
(
const
framework
::
Tensor
&
tensor
)
{
auto
buffer_info
=
details
::
CastToPyBufferImpl
<
true
,
0
,
float
,
int
,
double
,
int64_t
,
bool
,
platform
::
float16
>
()(
tensor
);
...
...
@@ -111,7 +107,7 @@ inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) {
}
template
<
typename
T
>
T
TensorGetElement
(
framework
::
Tensor
&
self
,
size_t
offset
)
{
T
TensorGetElement
(
const
framework
::
Tensor
&
self
,
size_t
offset
)
{
if
(
platform
::
is_cpu_place
(
self
.
place
()))
{
return
self
.
data
<
T
>
()[
offset
];
}
else
{
...
...
@@ -123,31 +119,32 @@ T TensorGetElement(framework::Tensor &self, size_t offset) {
// TODO(dzhwinter) : fix the redundent Tensor allocate and free
template
<
typename
T
>
void
TensorSetElement
(
framework
::
Tensor
&
self
,
size_t
offset
,
T
elem
)
{
if
(
platform
::
is_gpu_place
(
self
.
place
()))
{
void
TensorSetElement
(
framework
::
Tensor
*
self
,
size_t
offset
,
T
elem
)
{
if
(
platform
::
is_gpu_place
(
self
->
place
()))
{
std
::
shared_ptr
<
framework
::
Tensor
>
dst
(
new
framework
::
Tensor
);
framework
::
TensorCopy
(
self
,
platform
::
CPUPlace
(),
dst
.
get
());
framework
::
TensorCopy
(
*
self
,
platform
::
CPUPlace
(),
dst
.
get
());
dst
->
data
<
T
>
()[
offset
]
=
elem
;
framework
::
TensorCopy
(
*
dst
.
get
(),
self
.
place
(),
&
self
);
framework
::
TensorCopy
(
*
dst
.
get
(),
self
->
place
(),
self
);
}
else
if
(
platform
::
is_cpu_place
(
self
.
place
()))
{
self
.
data
<
T
>
()[
offset
]
=
elem
;
}
else
if
(
platform
::
is_cpu_place
(
self
->
place
()))
{
self
->
data
<
T
>
()[
offset
]
=
elem
;
}
}
template
<
typename
T
>
void
PyCPUTensorSetFromArray
(
framework
::
Tensor
&
self
,
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
&
place
)
{
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
T
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
size_t
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
.
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
.
mutable_data
<
T
>
(
place
);
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
T
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
T
)
*
array
.
size
());
}
...
...
@@ -155,34 +152,37 @@ template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
void
PyCPUTensorSetFromArray
(
framework
::
Tensor
&
self
,
py
::
array_t
<
uint16_t
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
&
place
)
{
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
uint16_t
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
size_t
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
.
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
.
mutable_data
<
platform
::
float16
>
(
place
);
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
platform
::
float16
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
uint16_t
)
*
array
.
size
());
}
#ifdef PADDLE_WITH_CUDA
template
<
typename
T
>
void
PyCUDATensorSetFromArray
(
framework
::
Tensor
&
self
,
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
T
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CUDAPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
size_t
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
.
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
.
mutable_data
<
T
>
(
place
);
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
T
>
(
place
);
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
dev_ctx
=
...
...
@@ -195,17 +195,19 @@ template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
void
PyCUDATensorSetFromArray
(
framework
::
Tensor
&
self
,
py
::
array_t
<
uint16_t
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
uint16_t
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CUDAPlace
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
size_t
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
.
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
.
mutable_data
<
platform
::
float16
>
(
place
);
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
platform
::
float16
>
(
place
);
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
dev_ctx
=
...
...
@@ -217,8 +219,9 @@ void PyCUDATensorSetFromArray(
template
<
typename
T
>
void
PyCUDAPinnedTensorSetFromArray
(
framework
::
Tensor
&
self
,
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
T
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
const
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
...
...
@@ -226,8 +229,8 @@ void PyCUDAPinnedTensorSetFromArray(
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
.
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
.
mutable_data
<
T
>
(
place
);
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
T
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
T
)
*
array
.
size
());
}
...
...
@@ -235,8 +238,10 @@ template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
void
PyCUDAPinnedTensorSetFromArray
(
framework
::
Tensor
&
self
,
py
::
array_t
<
uint16_t
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
framework
::
Tensor
*
self
,
pybind11
::
array_t
<
uint16_t
,
pybind11
::
array
::
c_style
|
pybind11
::
array
::
forcecast
>
array
,
const
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
...
...
@@ -244,8 +249,8 @@ void PyCUDAPinnedTensorSetFromArray(
dims
.
push_back
(
static_cast
<
int
>
(
array
.
shape
()[
i
]));
}
self
.
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
.
mutable_data
<
platform
::
float16
>
(
place
);
self
->
Resize
(
framework
::
make_ddim
(
dims
));
auto
*
dst
=
self
->
mutable_data
<
platform
::
float16
>
(
place
);
std
::
memcpy
(
dst
,
array
.
data
(),
sizeof
(
uint16_t
)
*
array
.
size
());
}
#endif
...
...
paddle/fluid/pybind/tensor_py_test.cc
0 → 100644
浏览文件 @
1543c4cf
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/pybind/tensor_py.h"
#include <iostream>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/tensor.h"
TEST
(
TensorPy
,
CastToPyBufferImpl
)
{
typedef
int
ElemType
;
paddle
::
framework
::
Tensor
t
;
auto
d
=
paddle
::
framework
::
make_ddim
({
1
,
2
,
3
});
int
*
p
=
t
.
mutable_data
<
ElemType
>
(
d
,
paddle
::
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
paddle
::
framework
::
product
(
d
);
++
i
)
{
p
[
i
]
=
i
;
}
pybind11
::
buffer_info
bi
=
paddle
::
pybind
::
CastToPyBuffer
(
t
);
EXPECT_EQ
(
bi
.
itemsize
,
static_cast
<
size_t
>
(
sizeof
(
ElemType
)));
EXPECT_EQ
(
bi
.
size
,
static_cast
<
size_t
>
(
paddle
::
framework
::
product
(
d
)));
EXPECT_EQ
(
bi
.
ndim
,
static_cast
<
size_t
>
(
3
));
// 3-dimensional as d.
EXPECT_EQ
(
bi
.
shape
.
size
(),
3U
);
// as Dim d.
EXPECT_EQ
(
bi
.
shape
[
0
],
static_cast
<
size_t
>
(
1
));
EXPECT_EQ
(
bi
.
shape
[
1
],
static_cast
<
size_t
>
(
2
));
EXPECT_EQ
(
bi
.
shape
[
2
],
static_cast
<
size_t
>
(
3
));
EXPECT_EQ
(
bi
.
strides
.
size
(),
3U
);
// 3-dimensional as d.
EXPECT_EQ
(
bi
.
strides
[
2
],
static_cast
<
size_t
>
(
sizeof
(
ElemType
)));
EXPECT_EQ
(
bi
.
strides
[
1
],
static_cast
<
size_t
>
(
sizeof
(
ElemType
)
*
3
));
EXPECT_EQ
(
bi
.
strides
[
0
],
static_cast
<
size_t
>
(
sizeof
(
ElemType
)
*
2
*
3
));
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录