Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
4f9d6529
P
Paddle
项目概览
PaddlePaddle
/
Paddle
接近 2 年 前同步成功
通知
2323
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4f9d6529
编写于
9月 16, 2020
作者:
C
Chen Weihang
提交者:
GitHub
9月 16, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish framework error message part 7 (#27266)
* polish framework error message part 7 * fix typo * polish by reviewes comment
上级
950301bf
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
167 addition
and
99 deletion
+167
-99
paddle/fluid/framework/reader.cc
paddle/fluid/framework/reader.cc
+4
-1
paddle/fluid/framework/rw_lock.h
paddle/fluid/framework/rw_lock.h
+8
-4
paddle/fluid/framework/save_load_util.cc
paddle/fluid/framework/save_load_util.cc
+67
-53
paddle/fluid/framework/selected_rows.cc
paddle/fluid/framework/selected_rows.cc
+23
-13
paddle/fluid/framework/selected_rows.h
paddle/fluid/framework/selected_rows.h
+2
-1
paddle/fluid/framework/shape_inference.cc
paddle/fluid/framework/shape_inference.cc
+10
-8
paddle/fluid/framework/tensor_util.cc
paddle/fluid/framework/tensor_util.cc
+48
-18
paddle/fluid/framework/tensor_util.h
paddle/fluid/framework/tensor_util.h
+5
-1
未找到文件。
paddle/fluid/framework/reader.cc
浏览文件 @
4f9d6529
...
@@ -20,7 +20,10 @@ namespace framework {
...
@@ -20,7 +20,10 @@ namespace framework {
void
ReaderBase
::
ReadNext
(
std
::
vector
<
LoDTensor
>
*
out
)
{
void
ReaderBase
::
ReadNext
(
std
::
vector
<
LoDTensor
>
*
out
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
mu_
);
std
::
lock_guard
<
std
::
mutex
>
lock
(
mu_
);
PADDLE_ENFORCE_EQ
(
status_
,
ReaderStatus
::
kRunning
);
PADDLE_ENFORCE_EQ
(
status_
,
ReaderStatus
::
kRunning
,
platform
::
errors
::
Unavailable
(
"The current reader has stopped running and cannot "
"continue to read the next batch of data."
));
ReadNextImpl
(
out
);
ReadNextImpl
(
out
);
}
}
...
...
paddle/fluid/framework/rw_lock.h
浏览文件 @
4f9d6529
...
@@ -32,17 +32,21 @@ struct RWLock {
...
@@ -32,17 +32,21 @@ struct RWLock {
~
RWLock
()
{
pthread_rwlock_destroy
(
&
lock_
);
}
~
RWLock
()
{
pthread_rwlock_destroy
(
&
lock_
);
}
inline
void
RDLock
()
{
inline
void
RDLock
()
{
PADDLE_ENFORCE_EQ
(
pthread_rwlock_rdlock
(
&
lock_
),
0
,
PADDLE_ENFORCE_EQ
(
"acquire read lock failed"
);
pthread_rwlock_rdlock
(
&
lock_
),
0
,
platform
::
errors
::
External
(
"The pthread failed to acquire read lock."
));
}
}
inline
void
WRLock
()
{
inline
void
WRLock
()
{
PADDLE_ENFORCE_EQ
(
pthread_rwlock_wrlock
(
&
lock_
),
0
,
PADDLE_ENFORCE_EQ
(
pthread_rwlock_wrlock
(
&
lock_
),
0
,
"acquire write lock failed"
);
platform
::
errors
::
External
(
"The pthread failed to acquire write lock."
));
}
}
inline
void
UNLock
()
{
inline
void
UNLock
()
{
PADDLE_ENFORCE_EQ
(
pthread_rwlock_unlock
(
&
lock_
),
0
,
"unlock failed"
);
PADDLE_ENFORCE_EQ
(
pthread_rwlock_unlock
(
&
lock_
),
0
,
platform
::
errors
::
External
(
"The pthread failed to unlock."
));
}
}
private:
private:
...
...
paddle/fluid/framework/save_load_util.cc
浏览文件 @
4f9d6529
...
@@ -33,7 +33,8 @@ void CheckInStreamState(std::istream& istre, size_t length) {
...
@@ -33,7 +33,8 @@ void CheckInStreamState(std::istream& istre, size_t length) {
VLOG
(
5
)
<<
"Can't read ["
<<
length
<<
"] from file"
VLOG
(
5
)
<<
"Can't read ["
<<
length
<<
"] from file"
<<
"file seems breakem"
;
<<
"file seems breakem"
;
PADDLE_THROW
(
"Model load error, file seems breaken"
);
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Model load failed, istream state error."
));
}
}
}
}
...
@@ -58,10 +59,11 @@ size_t ReadTensorNumber(std::istream& istre) {
...
@@ -58,10 +59,11 @@ size_t ReadTensorNumber(std::istream& istre) {
sizeof
(
char
)
*
tensor_number_mark
.
size
());
sizeof
(
char
)
*
tensor_number_mark
.
size
());
std
::
string
str_read_tensor_number_mark
(
tensor_number_mark_buffer
,
std
::
string
str_read_tensor_number_mark
(
tensor_number_mark_buffer
,
tensor_number_mark
.
size
());
tensor_number_mark
.
size
());
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
tensor_number_mark
,
str_read_tensor_number_mark
,
tensor_number_mark
,
str_read_tensor_number_mark
,
platform
::
errors
::
InvalidArgument
(
"Tensor number mark not match, expect [%s], but read from file is [%]"
,
"Tensor number mark does not match, expect mark is "
tensor_number_mark
,
str_read_tensor_number_mark
);
"[%s], but the mark read from file is [%s]."
,
tensor_number_mark
,
str_read_tensor_number_mark
));
size_t
tensor_number
=
0
;
size_t
tensor_number
=
0
;
istre
.
read
(
reinterpret_cast
<
char
*>
(
&
tensor_number
),
sizeof
(
tensor_number
));
istre
.
read
(
reinterpret_cast
<
char
*>
(
&
tensor_number
),
sizeof
(
tensor_number
));
...
@@ -79,10 +81,11 @@ std::string ReadTensorName(std::istream& istre) {
...
@@ -79,10 +81,11 @@ std::string ReadTensorName(std::istream& istre) {
std
::
string
str_read_tensor_name_mark
(
name_mark_buffer
,
std
::
string
str_read_tensor_name_mark
(
name_mark_buffer
,
tensor_name_mark
.
size
());
tensor_name_mark
.
size
());
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
tensor_name_mark
,
str_read_tensor_name_mark
,
tensor_name_mark
,
str_read_tensor_name_mark
,
platform
::
errors
::
InvalidArgument
(
"Tensor name mark not match, expect [%s], but read from file is [%]"
,
"Tensor name mark does not match, expect mark is [%s], "
tensor_name_mark
,
str_read_tensor_name_mark
);
"but the mark read from file is [%s]."
,
tensor_name_mark
,
str_read_tensor_name_mark
));
size_t
tensor_name_length
=
0
;
size_t
tensor_name_length
=
0
;
istre
.
read
(
reinterpret_cast
<
char
*>
(
&
tensor_name_length
),
istre
.
read
(
reinterpret_cast
<
char
*>
(
&
tensor_name_length
),
...
@@ -117,16 +120,18 @@ bool SaveStaticNameListToDisk(
...
@@ -117,16 +120,18 @@ bool SaveStaticNameListToDisk(
for
(
size_t
i
=
0
;
i
<
vec_tensor_name_list
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
vec_tensor_name_list
.
size
();
++
i
)
{
auto
var_ptr
=
scope
.
FindVar
(
vec_tensor_name_list
[
i
]);
auto
var_ptr
=
scope
.
FindVar
(
vec_tensor_name_list
[
i
]);
PADDLE_ENFORCE_NE
(
PADDLE_ENFORCE_NOT_NULL
(
var_ptr
,
nullptr
,
var_ptr
,
platform
::
errors
::
NotFound
(
"Variable (%s) is not found when "
"Variable find error, when save model, can't not find vairable [%s], "
"saving model, please make sure "
"Please make sure you have run StartUpProgram"
,
"that exe.run(startup_program) has "
vec_tensor_name_list
[
i
]);
"been executed."
,
vec_tensor_name_list
[
i
]));
Tensor
*
tensor
=
var_ptr
->
GetMutable
<
LoDTensor
>
();
Tensor
*
tensor
=
var_ptr
->
GetMutable
<
LoDTensor
>
();
PADDLE_ENFORCE_EQ
(
tensor
->
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
tensor
->
IsInitialized
(),
true
,
"Paramter [%s] not initialzed,"
platform
::
errors
::
PreconditionNotMet
(
"Please make sure you have run StartUpProgram"
,
"Paramter [%s] is not initialzed, please make sure "
vec_tensor_name_list
[
i
]);
"that exe.run(startup_program) has been executed."
,
vec_tensor_name_list
[
i
]));
map_tensor
[
vec_tensor_name_list
[
i
]]
=
tensor
;
map_tensor
[
vec_tensor_name_list
[
i
]]
=
tensor
;
}
}
...
@@ -145,9 +150,10 @@ bool SaveDygraphVarBaseListToDisk(
...
@@ -145,9 +150,10 @@ bool SaveDygraphVarBaseListToDisk(
Tensor
*
tensor
=
var_ptr
->
GetMutable
<
LoDTensor
>
();
Tensor
*
tensor
=
var_ptr
->
GetMutable
<
LoDTensor
>
();
PADDLE_ENFORCE_EQ
(
tensor
->
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
tensor
->
IsInitialized
(),
true
,
"Paramter [%s] not initialzed,"
platform
::
errors
::
PreconditionNotMet
(
"Please make sure you have run StartUpProgram"
,
"Paramter [%s] is not initialzed, please make sure "
vec_var_base_list
[
i
]
->
Name
());
"that exe.run(startup_program) has been executed."
,
vec_var_base_list
[
i
]
->
Name
()));
map_tensor
[
vec_var_base_list
[
i
]
->
Name
()]
=
tensor
;
map_tensor
[
vec_var_base_list
[
i
]
->
Name
()]
=
tensor
;
}
}
...
@@ -185,34 +191,41 @@ bool LoadStaticNameListFromDisk(
...
@@ -185,34 +191,41 @@ bool LoadStaticNameListFromDisk(
for
(
size_t
i
=
0
;
i
<
vec_tensor_name_list
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
vec_tensor_name_list
.
size
();
++
i
)
{
auto
it
=
map_load_tensor
.
find
(
vec_tensor_name_list
[
i
]);
auto
it
=
map_load_tensor
.
find
(
vec_tensor_name_list
[
i
]);
PADDLE_ENFORCE
(
it
!=
map_load_tensor
.
end
(),
PADDLE_ENFORCE
_NE
(
it
,
map_load_tensor
.
end
(),
"Paramete not found in Model file, "
platform
::
errors
::
NotFound
(
"Can not find [%s] in model file [%s]
"
,
"Parameter (%s) not found in model file (%s).
"
,
vec_tensor_name_list
[
i
],
file_name
);
vec_tensor_name_list
[
i
],
file_name
)
);
auto
var_ptr
=
scope
.
FindVar
(
vec_tensor_name_list
[
i
]);
auto
var_ptr
=
scope
.
FindVar
(
vec_tensor_name_list
[
i
]);
PADDLE_ENFORCE_NE
(
PADDLE_ENFORCE_NOT_NULL
(
var_ptr
,
nullptr
,
var_ptr
,
"Parameter not created, when load model, can't not find parameter [%s] "
platform
::
errors
::
PreconditionNotMet
(
"please make sure you have run StartUpProgram"
,
"Parameter (%s) is not created when loading model, "
vec_tensor_name_list
[
i
]);
"please make sure that exe.run(startup_program) has been executed."
,
vec_tensor_name_list
[
i
]));
Tensor
*
tensor
=
var_ptr
->
GetMutable
<
LoDTensor
>
();
Tensor
*
tensor
=
var_ptr
->
GetMutable
<
LoDTensor
>
();
PADDLE_ENFORCE_NE
(
tensor
,
nullptr
,
PADDLE_ENFORCE_NOT_NULL
(
"Paramter [%s] not initialzed "
tensor
,
"please make sure you have run startUpProgram"
,
platform
::
errors
::
PreconditionNotMet
(
vec_tensor_name_list
[
i
]);
"Paramter [%s] is not initialzed, "
"please make sure that exe.run(startup_program) has been executed."
,
vec_tensor_name_list
[
i
]));
PADDLE_ENFORCE_EQ
(
tensor
->
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
tensor
->
IsInitialized
(),
true
,
"Paramter [%s] not initialzed "
platform
::
errors
::
PreconditionNotMet
(
"please make sure you have run StartUpProgram"
,
"Paramter [%s] is not initialzed, "
vec_tensor_name_list
[
i
]);
"please make sure that exe.run(startup_program) has "
"been executed.v"
,
vec_tensor_name_list
[
i
]));
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
tensor
->
dims
(),
it
->
second
->
dims
(),
tensor
->
dims
(),
it
->
second
->
dims
(),
"Shape not matching: the Program requires a parameter with a shape of "
platform
::
errors
::
InvalidArgument
(
"(%s), "
"Shape does not match, the program requires a parameter with a "
"while the loaded parameter (namely [ %s ]) has a shape of (%s)."
,
"shape of "
tensor
->
dims
(),
vec_tensor_name_list
[
i
],
it
->
second
->
dims
());
"(%s), while the loaded parameter (namely [ %s ]) has a shape of "
"(%s)."
,
tensor
->
dims
(),
vec_tensor_name_list
[
i
],
it
->
second
->
dims
()));
TensorCopySync
(
*
(
it
->
second
.
get
()),
tensor
->
place
(),
tensor
);
TensorCopySync
(
*
(
it
->
second
.
get
()),
tensor
->
place
(),
tensor
);
...
@@ -239,9 +252,9 @@ bool SaveTensorToDisk(const std::string& file_name,
...
@@ -239,9 +252,9 @@ bool SaveTensorToDisk(const std::string& file_name,
MkDirRecursively
(
DirName
(
file_name
).
c_str
());
MkDirRecursively
(
DirName
(
file_name
).
c_str
());
std
::
ofstream
fout
(
file_name
,
std
::
ios
::
binary
);
std
::
ofstream
fout
(
file_name
,
std
::
ios
::
binary
);
if
(
!
fout
)
{
PADDLE_ENFORCE_EQ
(
PADDLE_THROW
(
"File open error. Can not open file [%s]"
,
file_name
);
fout
.
is_open
(),
true
,
}
platform
::
errors
::
Unavailable
(
"File (%s) open failed."
,
file_name
));
// first 256 byte for reserve for fulture upgrade
// first 256 byte for reserve for fulture upgrade
char
*
kReserveBuffer
=
new
char
[
model_file_reserve_size
];
char
*
kReserveBuffer
=
new
char
[
model_file_reserve_size
];
...
@@ -292,9 +305,8 @@ bool SaveTensorToDisk(const std::string& file_name,
...
@@ -292,9 +305,8 @@ bool SaveTensorToDisk(const std::string& file_name,
TensorCopySync
(
*
tensor
,
platform
::
CPUPlace
(),
&
temp
);
TensorCopySync
(
*
tensor
,
platform
::
CPUPlace
(),
&
temp
);
data_ptr
=
temp
.
data
<
void
>
();
data_ptr
=
temp
.
data
<
void
>
();
#else
#else
PADDLE_THROW
(
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Tensor is in CUDA device, but paddle not compile with CUDA, this "
"Tensor is in CUDA device, but paddle not compiled with CUDA."
));
"should not happen"
);
#endif
#endif
}
}
fout
.
write
(
static_cast
<
const
char
*>
(
data_ptr
),
fout
.
write
(
static_cast
<
const
char
*>
(
data_ptr
),
...
@@ -302,8 +314,9 @@ bool SaveTensorToDisk(const std::string& file_name,
...
@@ -302,8 +314,9 @@ bool SaveTensorToDisk(const std::string& file_name,
}
}
if
(
!
fout
)
{
if
(
!
fout
)
{
PADDLE_THROW
(
"Model save failed, data write to model file [%s] error"
,
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
file_name
);
"Model save failed, error when writing data into model file [%s]."
,
file_name
));
}
}
fout
.
close
();
fout
.
close
();
...
@@ -316,9 +329,9 @@ bool LoadTensorFromDisk(
...
@@ -316,9 +329,9 @@ bool LoadTensorFromDisk(
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
Tensor
>>*
map_tensor
)
{
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
Tensor
>>*
map_tensor
)
{
std
::
ifstream
fin
(
file_name
,
std
::
ios
::
binary
);
std
::
ifstream
fin
(
file_name
,
std
::
ios
::
binary
);
if
(
!
fin
)
{
PADDLE_ENFORCE_EQ
(
PADDLE_THROW
(
"File open error. Can not open model file [%s]"
,
file_name
);
fin
.
is_open
(),
true
,
}
platform
::
errors
::
Unavailable
(
"File (%s) open failed."
,
file_name
));
ReadReserveBuffer
(
fin
);
ReadReserveBuffer
(
fin
);
...
@@ -331,7 +344,8 @@ bool LoadTensorFromDisk(
...
@@ -331,7 +344,8 @@ bool LoadTensorFromDisk(
uint32_t
version
;
uint32_t
version
;
fin
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
fin
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
CheckInStreamState
(
fin
,
sizeof
(
version
));
CheckInStreamState
(
fin
,
sizeof
(
version
));
PADDLE_ENFORCE_EQ
(
version
,
0U
,
"Only version 0 is supported"
);
PADDLE_ENFORCE_EQ
(
version
,
0U
,
platform
::
errors
::
InvalidArgument
(
"Only version 0 tensor is supported."
));
proto
::
VarType
::
TensorDesc
desc
;
proto
::
VarType
::
TensorDesc
desc
;
{
{
// int32_t size
// int32_t size
...
@@ -344,7 +358,7 @@ bool LoadTensorFromDisk(
...
@@ -344,7 +358,7 @@ bool LoadTensorFromDisk(
CheckInStreamState
(
fin
,
sizeof
(
size
));
CheckInStreamState
(
fin
,
sizeof
(
size
));
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
desc
.
ParseFromArray
(
buf
.
get
(),
size
),
true
,
desc
.
ParseFromArray
(
buf
.
get
(),
size
),
true
,
platform
::
errors
::
InvalidArgument
(
"
Cannot parse tensor desc
"
));
platform
::
errors
::
InvalidArgument
(
"
Parse tensor desc failed.
"
));
}
}
{
// read tensor
{
// read tensor
...
...
paddle/fluid/framework/selected_rows.cc
浏览文件 @
4f9d6529
...
@@ -113,7 +113,9 @@ void DeserializeFromStream(std::istream& is, SelectedRows* selected_rows,
...
@@ -113,7 +113,9 @@ void DeserializeFromStream(std::istream& is, SelectedRows* selected_rows,
// the 1st field, unit32_t version for SelectedRows
// the 1st field, unit32_t version for SelectedRows
uint32_t
version
;
uint32_t
version
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
is
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
PADDLE_ENFORCE_EQ
(
version
,
0U
,
"Only version 0 is supported"
);
PADDLE_ENFORCE_EQ
(
version
,
0U
,
platform
::
errors
::
InvalidArgument
(
"Only version 0 SelectedRows is supported."
));
}
}
{
{
// the 2st field, rows information
// the 2st field, rows information
...
@@ -155,24 +157,27 @@ int64_t SelectedRows::AutoGrownIndex(int64_t key, bool auto_grown,
...
@@ -155,24 +157,27 @@ int64_t SelectedRows::AutoGrownIndex(int64_t key, bool auto_grown,
auto
iter
=
id_to_index_
.
find
(
key
);
auto
iter
=
id_to_index_
.
find
(
key
);
if
(
iter
==
id_to_index_
.
end
())
{
if
(
iter
==
id_to_index_
.
end
())
{
rwlock_
->
UNLock
();
rwlock_
->
UNLock
();
if
(
!
auto_grown
)
{
PADDLE_ENFORCE_EQ
(
PADDLE_THROW
(
"key %d not found"
,
key
);
auto_grown
,
true
,
}
platform
::
errors
::
NotFound
(
"Input key(%lld) is not found."
,
key
));
rwlock_
->
WRLock
();
rwlock_
->
WRLock
();
auto
map_size
=
id_to_index_
.
size
();
auto
map_size
=
id_to_index_
.
size
();
auto
vector_size
=
rows_
.
size
();
auto
vector_size
=
rows_
.
size
();
if
(
map_size
!=
vector_size
)
{
if
(
map_size
!=
vector_size
)
{
rwlock_
->
UNLock
();
rwlock_
->
UNLock
();
PADDLE_THROW
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"
id_to_index_ size %d should have the same size with rows_ %d"
,
"
Row map size(%zu) should be equal to rows size(%zu)."
,
map_size
,
map_size
,
vector_size
);
vector_size
)
);
}
}
auto
write_iter
=
id_to_index_
.
find
(
key
);
auto
write_iter
=
id_to_index_
.
find
(
key
);
if
(
write_iter
==
id_to_index_
.
end
())
{
if
(
write_iter
==
id_to_index_
.
end
())
{
int
row_num
=
rows_
.
size
();
int
row_num
=
rows_
.
size
();
if
(
row_num
==
value_
->
dims
()[
0
])
{
if
(
row_num
==
value_
->
dims
()[
0
])
{
rwlock_
->
UNLock
();
rwlock_
->
UNLock
();
PADDLE_THROW
(
"selected rows is full, then length exceed %d"
,
row_num
);
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Selected rows is full, then length exceed the length of first "
"dimension (%d)."
,
row_num
));
}
}
// key logic to put a key into id_to_index_
// key logic to put a key into id_to_index_
rows_
.
push_back
(
key
);
rows_
.
push_back
(
key
);
...
@@ -203,15 +208,20 @@ void SelectedRows::SyncIndex() {
...
@@ -203,15 +208,20 @@ void SelectedRows::SyncIndex() {
void
SelectedRows
::
Get
(
const
framework
::
Tensor
&
ids
,
framework
::
Tensor
*
value
,
void
SelectedRows
::
Get
(
const
framework
::
Tensor
&
ids
,
framework
::
Tensor
*
value
,
bool
auto_grown
,
bool
is_test
)
{
bool
auto_grown
,
bool
is_test
)
{
PADDLE_ENFORCE
(
value
->
IsInitialized
(),
PADDLE_ENFORCE_EQ
(
value
->
IsInitialized
(),
true
,
"The value tensor should be initialized."
);
platform
::
errors
::
InvalidArgument
(
"The value tensor is not initialized."
));
if
(
ids
.
numel
()
==
0
)
{
if
(
ids
.
numel
()
==
0
)
{
VLOG
(
3
)
<<
"keys is empty, please check data!"
;
VLOG
(
3
)
<<
"keys is empty, please check data!"
;
}
else
{
}
else
{
int64_t
value_width
=
value_
->
numel
()
/
value_
->
dims
()[
0
];
int64_t
value_width
=
value_
->
numel
()
/
value_
->
dims
()[
0
];
PADDLE_ENFORCE_EQ
(
value_width
,
value
->
numel
()
/
value
->
dims
()[
0
],
PADDLE_ENFORCE_EQ
(
"output tensor should have the same shape with table "
value_width
,
value
->
numel
()
/
value
->
dims
()[
0
],
"except the dims[0]."
);
platform
::
errors
::
InvalidArgument
(
"Output tensor should have the same shape with table "
"except the first dimmension, excepted value width not counting "
"the first dimension is %d, actual value width is %d."
,
value_width
,
value
->
numel
()
/
value
->
dims
()[
0
]));
for
(
int
i
=
0
;
i
<
ids
.
numel
();
++
i
)
{
for
(
int
i
=
0
;
i
<
ids
.
numel
();
++
i
)
{
auto
id
=
ids
.
data
<
int64_t
>
()[
i
];
auto
id
=
ids
.
data
<
int64_t
>
()[
i
];
int64_t
index
=
AutoGrownIndex
(
id
,
auto_grown
,
is_test
);
int64_t
index
=
AutoGrownIndex
(
id
,
auto_grown
,
is_test
);
...
...
paddle/fluid/framework/selected_rows.h
浏览文件 @
4f9d6529
...
@@ -82,7 +82,8 @@ class SelectedRows {
...
@@ -82,7 +82,8 @@ class SelectedRows {
int64_t
Index
(
int64_t
key
)
const
{
int64_t
Index
(
int64_t
key
)
const
{
auto
it
=
std
::
find
(
rows_
.
begin
(),
rows_
.
end
(),
key
);
auto
it
=
std
::
find
(
rows_
.
begin
(),
rows_
.
end
(),
key
);
if
(
it
==
rows_
.
end
())
{
if
(
it
==
rows_
.
end
())
{
PADDLE_THROW
(
"id %s not in table"
,
key
);
PADDLE_THROW
(
platform
::
errors
::
NotFound
(
"Input id (%lld) is not in current rows table."
,
key
));
}
}
return
static_cast
<
int64_t
>
(
std
::
distance
(
rows_
.
begin
(),
it
));
return
static_cast
<
int64_t
>
(
std
::
distance
(
rows_
.
begin
(),
it
));
}
}
...
...
paddle/fluid/framework/shape_inference.cc
浏览文件 @
4f9d6529
...
@@ -25,20 +25,22 @@ namespace framework {
...
@@ -25,20 +25,22 @@ namespace framework {
std
::
vector
<
DDim
>
InferShapeContext
::
GetReaderDims
(
std
::
vector
<
DDim
>
InferShapeContext
::
GetReaderDims
(
const
std
::
string
&
name
)
const
{
const
std
::
string
&
name
)
const
{
const
std
::
vector
<
std
::
string
>
&
arg_names
=
Inputs
(
name
);
const
std
::
vector
<
std
::
string
>
&
arg_names
=
Inputs
(
name
);
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
arg_names
.
size
(),
1UL
,
arg_names
.
size
(),
1UL
,
platform
::
errors
::
InvalidArgument
(
"Reader input '%s' should hold one element, but now it holds %d"
,
name
,
"Reader input '%s' should hold one element, but now it "
arg_names
.
size
());
"holds %d elements."
,
name
,
arg_names
.
size
()));
return
this
->
GetRepeatedDims
(
arg_names
[
0
]);
return
this
->
GetRepeatedDims
(
arg_names
[
0
]);
}
}
void
InferShapeContext
::
SetReaderDims
(
const
std
::
string
&
name
,
void
InferShapeContext
::
SetReaderDims
(
const
std
::
string
&
name
,
const
std
::
vector
<
DDim
>
&
dims
)
{
const
std
::
vector
<
DDim
>
&
dims
)
{
const
std
::
vector
<
std
::
string
>
&
arg_names
=
Outputs
(
name
);
const
std
::
vector
<
std
::
string
>
&
arg_names
=
Outputs
(
name
);
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
arg_names
.
size
(),
1UL
,
arg_names
.
size
(),
1UL
,
platform
::
errors
::
InvalidArgument
(
"Reader output '%s' should hold one element, but now it holds %d"
,
name
,
"Reader output '%s' should hold one element, but now "
arg_names
.
size
());
"it holds %d elements."
,
name
,
arg_names
.
size
()));
return
this
->
SetRepeatedDims
(
arg_names
[
0
],
dims
);
return
this
->
SetRepeatedDims
(
arg_names
[
0
],
dims
);
}
}
...
...
paddle/fluid/framework/tensor_util.cc
浏览文件 @
4f9d6529
...
@@ -94,9 +94,17 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
...
@@ -94,9 +94,17 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto
src_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
src_place
);
auto
src_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
src_place
);
auto
dst_cpu_place
=
BOOST_GET_CONST
(
platform
::
CPUPlace
,
dst_place
);
auto
dst_cpu_place
=
BOOST_GET_CONST
(
platform
::
CPUPlace
,
dst_place
);
auto
ctx_place
=
ctx
.
GetPlace
();
auto
ctx_place
=
ctx
.
GetPlace
();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx_place
),
true
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx_place
),
true
,
platform
::
errors
::
PreconditionNotMet
(
"Context place error, excepted GPUPlace, but actually %s."
,
ctx_place
));
auto
ctx_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
ctx_place
);
auto
ctx_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
ctx_place
);
PADDLE_ENFORCE_EQ
(
src_gpu_place
,
ctx_gpu_place
);
PADDLE_ENFORCE_EQ
(
src_gpu_place
,
ctx_gpu_place
,
platform
::
errors
::
Unavailable
(
"Source place and context place do not match, source "
"place is %s, context place is %s."
,
src_gpu_place
,
ctx_gpu_place
));
auto
stream
=
auto
stream
=
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
ctx
).
stream
();
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
ctx
).
stream
();
memory
::
Copy
(
dst_cpu_place
,
dst_ptr
,
src_gpu_place
,
src_ptr
,
size
,
stream
);
memory
::
Copy
(
dst_cpu_place
,
dst_ptr
,
src_gpu_place
,
src_ptr
,
size
,
stream
);
...
@@ -106,9 +114,17 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
...
@@ -106,9 +114,17 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto
src_cpu_place
=
BOOST_GET_CONST
(
platform
::
CPUPlace
,
src_place
);
auto
src_cpu_place
=
BOOST_GET_CONST
(
platform
::
CPUPlace
,
src_place
);
auto
dst_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
dst_place
);
auto
dst_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
dst_place
);
auto
ctx_place
=
ctx
.
GetPlace
();
auto
ctx_place
=
ctx
.
GetPlace
();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx_place
),
true
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx_place
),
true
,
platform
::
errors
::
PreconditionNotMet
(
"Context place error, excepted GPUPlace, but actually %s."
,
ctx_place
));
auto
ctx_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
ctx_place
);
auto
ctx_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
ctx_place
);
PADDLE_ENFORCE_EQ
(
dst_gpu_place
,
ctx_gpu_place
);
PADDLE_ENFORCE_EQ
(
dst_gpu_place
,
ctx_gpu_place
,
platform
::
errors
::
Unavailable
(
"Destination place and context place do not match, "
"destination place is %s, context place is %s."
,
dst_gpu_place
,
ctx_gpu_place
));
auto
stream
=
auto
stream
=
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
ctx
).
stream
();
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
ctx
).
stream
();
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_cpu_place
,
src_ptr
,
size
,
stream
);
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_cpu_place
,
src_ptr
,
size
,
stream
);
...
@@ -164,7 +180,11 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
...
@@ -164,7 +180,11 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto
src_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
src_place
);
auto
src_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
src_place
);
auto
dst_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
dst_place
);
auto
dst_gpu_place
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
dst_place
);
auto
ctx_place
=
ctx
.
GetPlace
();
auto
ctx_place
=
ctx
.
GetPlace
();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx_place
),
true
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx_place
),
true
,
platform
::
errors
::
PreconditionNotMet
(
"Context place error, excepted GPUPlace, but actually %s."
,
ctx_place
));
auto
stream
=
auto
stream
=
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
ctx
).
stream
();
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
ctx
).
stream
();
if
(
platform
::
is_same_place
(
src_place
,
dst_place
))
{
if
(
platform
::
is_same_place
(
src_place
,
dst_place
))
{
...
@@ -180,12 +200,14 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
...
@@ -180,12 +200,14 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_gpu_place
,
src_ptr
,
size
,
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_gpu_place
,
src_ptr
,
size
,
stream
);
stream
);
}
else
{
}
else
{
PADDLE_THROW
(
"ctx is not belong to dst_gpu_place or src_gpu_place."
);
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Context place dose not match the source and destination place."
));
}
}
}
}
}
}
else
{
// NOLINT
else
{
// NOLINT
PADDLE_THROW
(
"Copy from %s to %s is not supported."
,
src_place
,
dst_place
);
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Copying from %s to %s is not supported."
,
src_place
,
dst_place
));
}
}
#endif
#endif
}
}
...
@@ -298,7 +320,8 @@ void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
...
@@ -298,7 +320,8 @@ void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
nullptr
);
nullptr
);
}
}
else
{
// NOLINT
else
{
// NOLINT
PADDLE_THROW
(
"Copy from %s to %s is not supported."
,
src_place
,
dst_place
);
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Copy from %s to %s is not supported."
,
src_place
,
dst_place
));
}
}
#endif
#endif
}
}
...
@@ -832,7 +855,9 @@ void TensorFromStream(std::istream& is, Tensor* tensor,
...
@@ -832,7 +855,9 @@ void TensorFromStream(std::istream& is, Tensor* tensor,
void
*
GetDstPtrByDLDataType
(
DLDataType
type
,
framework
::
Tensor
*
dst
,
void
*
GetDstPtrByDLDataType
(
DLDataType
type
,
framework
::
Tensor
*
dst
,
const
platform
::
Place
&
dst_place
)
{
const
platform
::
Place
&
dst_place
)
{
// vector types not currently supported
// vector types not currently supported
PADDLE_ENFORCE_LE
(
type
.
lanes
,
1
,
"vector types not currently supported"
);
PADDLE_ENFORCE_LE
(
type
.
lanes
,
1
,
platform
::
errors
::
Unimplemented
(
"Vector type is not supported currently."
));
switch
(
type
.
bits
)
{
switch
(
type
.
bits
)
{
case
8
:
case
8
:
...
@@ -840,32 +865,37 @@ void* GetDstPtrByDLDataType(DLDataType type, framework::Tensor* dst,
...
@@ -840,32 +865,37 @@ void* GetDstPtrByDLDataType(DLDataType type, framework::Tensor* dst,
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int8_t
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int8_t
>
(
dst_place
));
if
(
type
.
code
==
kDLUInt
)
if
(
type
.
code
==
kDLUInt
)
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
uint8_t
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
uint8_t
>
(
dst_place
));
PADDLE_THROW
(
"There is no this type.code <%d> when type.bits is <%d>."
,
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
type
.
code
,
type
.
bits
);
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>."
,
type
.
code
,
type
.
bits
));
case
16
:
case
16
:
if
(
type
.
code
==
kDLInt
)
if
(
type
.
code
==
kDLInt
)
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int16_t
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int16_t
>
(
dst_place
));
if
(
type
.
code
==
kDLFloat
)
if
(
type
.
code
==
kDLFloat
)
return
static_cast
<
void
*>
(
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
paddle
::
platform
::
float16
>
(
dst_place
));
dst
->
mutable_data
<
paddle
::
platform
::
float16
>
(
dst_place
));
PADDLE_THROW
(
"There is no this type.code <%d> when type.bits is <%d>."
,
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
type
.
code
,
type
.
bits
);
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>."
,
type
.
code
,
type
.
bits
));
case
32
:
case
32
:
if
(
type
.
code
==
kDLInt
)
if
(
type
.
code
==
kDLInt
)
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int32_t
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int32_t
>
(
dst_place
));
if
(
type
.
code
==
kDLFloat
)
if
(
type
.
code
==
kDLFloat
)
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
float
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
float
>
(
dst_place
));
PADDLE_THROW
(
"There is no this type.code <%d> when type.bits is <%d>."
,
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
type
.
code
,
type
.
bits
);
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>."
,
type
.
code
,
type
.
bits
));
case
64
:
case
64
:
if
(
type
.
code
==
kDLInt
)
if
(
type
.
code
==
kDLInt
)
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int64_t
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
int64_t
>
(
dst_place
));
if
(
type
.
code
==
kDLFloat
)
if
(
type
.
code
==
kDLFloat
)
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
double
>
(
dst_place
));
return
static_cast
<
void
*>
(
dst
->
mutable_data
<
double
>
(
dst_place
));
PADDLE_THROW
(
"There is no this type.code <%d> when type.bits is <%d>."
,
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
type
.
code
,
type
.
bits
);
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>."
,
type
.
code
,
type
.
bits
));
default:
default:
PADDLE_THROW
(
"Unsupport type.bits %d"
,
type
.
bits
);
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Unsupported DLDataType.bits %d."
,
type
.
bits
));
}
}
}
}
...
...
paddle/fluid/framework/tensor_util.h
浏览文件 @
4f9d6529
...
@@ -183,7 +183,11 @@ void TensorToVector(const Tensor& src, std::vector<T>* dst) {
...
@@ -183,7 +183,11 @@ void TensorToVector(const Tensor& src, std::vector<T>* dst) {
dst
->
resize
(
src
.
numel
());
dst
->
resize
(
src
.
numel
());
auto
dst_ptr
=
static_cast
<
void
*>
(
dst
->
data
());
auto
dst_ptr
=
static_cast
<
void
*>
(
dst
->
data
());
PADDLE_ENFORCE_EQ
(
platform
::
is_cpu_place
(
src
.
place
()),
true
);
PADDLE_ENFORCE_EQ
(
platform
::
is_cpu_place
(
src
.
place
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The input tensor should be CPU device, but actually it is in %s."
,
src
.
place
()));
memory
::
Copy
(
dst_place
,
dst_ptr
,
memory
::
Copy
(
dst_place
,
dst_ptr
,
BOOST_GET_CONST
(
platform
::
CPUPlace
,
src
.
place
()),
src_ptr
,
size
);
BOOST_GET_CONST
(
platform
::
CPUPlace
,
src
.
place
()),
src_ptr
,
size
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录