Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ba997b8c
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ba997b8c
编写于
7月 14, 2018
作者:
Y
Yu Yang
提交者:
yuyang18
7月 14, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #12097 from reyoung/feature/hide_api_cont
Hide internal API of LoDTensors, Clipping, etc.
上级
bc8c7ccd
4ff1bde5
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
244 addition
and
210 deletion
+244
-210
paddle/fluid/operators/reader/open_files_op.cc
paddle/fluid/operators/reader/open_files_op.cc
+158
-119
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+14
-14
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+15
-15
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+11
-11
python/paddle/fluid/param_attr.py
python/paddle/fluid/param_attr.py
+9
-9
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+1
-4
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+8
-8
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+0
-1
python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py
.../paddle/fluid/tests/unittests/test_dynrnn_static_input.py
+7
-7
python/paddle/fluid/tests/unittests/test_selected_rows.py
python/paddle/fluid/tests/unittests/test_selected_rows.py
+3
-3
python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
...on/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
+2
-2
python/paddle/fluid/tests/unittests/test_tensor.py
python/paddle/fluid/tests/unittests/test_tensor.py
+15
-16
python/paddle/fluid/tests/unittests/testsuite.py
python/paddle/fluid/tests/unittests/testsuite.py
+1
-1
未找到文件。
paddle/fluid/operators/reader/open_files_op.cc
浏览文件 @
ba997b8c
...
@@ -12,8 +12,10 @@
...
@@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include <cmath>
#include <thread> // NOLINT
#include <thread> // NOLINT
#include "ThreadPool.h"
#include "paddle/fluid/framework/blocking_queue.h"
#include "paddle/fluid/operators/reader/blocking_queue.h"
#include "paddle/fluid/operators/reader/blocking_queue.h"
#include "paddle/fluid/operators/reader/reader_op_registry.h"
#include "paddle/fluid/operators/reader/reader_op_registry.h"
...
@@ -21,141 +23,171 @@ namespace paddle {
...
@@ -21,141 +23,171 @@ namespace paddle {
namespace
operators
{
namespace
operators
{
namespace
reader
{
namespace
reader
{
class
MultiFileReader
:
public
framework
::
ReaderBase
{
class
IReaderContainer
{
public:
public:
MultiFileReader
(
const
std
::
vector
<
std
::
string
>&
file_names
,
size_t
thread_num
,
virtual
~
IReaderContainer
()
{}
size_t
buffer_size
)
virtual
void
AppendReader
(
:
buffer_size_
(
buffer_size
)
{
std
::
unique_ptr
<
framework
::
ReaderBase
>&&
readers
)
=
0
;
readers_
.
reserve
(
file_names
.
size
());
virtual
void
Stop
()
=
0
;
for
(
const
std
::
string
&
f_name
:
file_names
)
{
virtual
void
Start
()
=
0
;
readers_
.
emplace_back
(
CreateReaderByFileName
(
f_name
));
virtual
void
ReadNext
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
=
0
;
};
class
OrderedReaderContainer
:
public
IReaderContainer
{
public:
void
AppendReader
(
std
::
unique_ptr
<
framework
::
ReaderBase
>&&
reader
)
override
{
pending_
.
emplace
(
std
::
move
(
reader
));
}
void
Stop
()
override
{
while
(
!
pending_
.
empty
())
{
MoveFrontPendingToDone
();
}
}
prefetchers_
.
resize
(
thread_num
);
StartNewScheduler
();
}
}
void
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
;
void
Start
()
override
{
std
::
swap
(
done_
,
pending_
);
}
~
MultiFileReader
()
{
EndScheduler
();
}
void
ReadNext
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
{
if
(
!
pending_
.
empty
())
{
pending_
.
front
()
->
ReadNext
(
out
);
if
(
out
->
empty
())
{
MoveFrontPendingToDone
();
ReadNext
(
out
);
}
}
else
{
out
->
clear
();
}
}
private:
private:
void
ShutdownImpl
()
override
{
EndScheduler
();
}
void
MoveFrontPendingToDone
()
{
pending_
.
front
()
->
Shutdown
();
void
StartImpl
()
override
{
StartNewScheduler
();
}
pending_
.
front
()
->
Start
();
done_
.
emplace
(
move
(
pending_
.
front
()));
void
StartNewScheduler
();
pending_
.
pop
();
void
EndScheduler
();
}
void
ScheduleThreadFunc
();
void
PrefetchThreadFunc
(
size_t
reader_idx
,
size_t
thread_idx
);
std
::
queue
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
pending_
;
std
::
queue
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
done_
;
std
::
vector
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
readers_
;
std
::
thread
scheduler_
;
std
::
vector
<
std
::
thread
>
prefetchers_
;
size_t
buffer_size_
;
reader
::
BlockingQueue
<
size_t
>*
waiting_reader_idx_
;
reader
::
BlockingQueue
<
size_t
>*
available_thread_idx_
;
reader
::
BlockingQueue
<
std
::
vector
<
framework
::
LoDTensor
>>*
buffer_
;
};
};
void
MultiFileReader
::
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
{
class
PreemptiveReaderContainer
:
public
IReaderContainer
{
if
(
!
buffer_
->
Receive
(
out
))
{
using
ReaderList
=
std
::
list
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
;
out
->
clear
();
}
}
void
MultiFileReader
::
StartNewScheduler
()
{
struct
FutureItem
{
size_t
thread_num
=
prefetchers_
.
size
();
std
::
vector
<
framework
::
LoDTensor
>
data_
;
waiting_reader_idx_
=
new
reader
::
BlockingQueue
<
size_t
>
(
readers_
.
size
());
ReaderList
::
iterator
reader_it_
;
available_thread_idx_
=
new
reader
::
BlockingQueue
<
size_t
>
(
thread_num
);
};
buffer_
=
new
reader
::
BlockingQueue
<
std
::
vector
<
framework
::
LoDTensor
>>
(
buffer_size_
);
for
(
size_t
i
=
0
;
i
<
readers_
.
size
();
++
i
)
{
using
FutureList
=
std
::
list
<
std
::
future
<
FutureItem
>>
;
waiting_reader_idx_
->
Send
(
i
);
}
waiting_reader_idx_
->
Close
();
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
available_thread_idx_
->
Send
(
i
);
}
scheduler_
=
std
::
thread
([
this
]
{
ScheduleThreadFunc
();
});
public:
}
explicit
PreemptiveReaderContainer
(
size_t
thread_num
)
:
pool_
(
thread_num
)
{
}
void
MultiFileReader
::
EndScheduler
()
{
void
Stop
()
override
{
available_thread_idx_
->
Close
();
if
(
!
pending_
.
empty
())
{
buffer_
->
Close
();
for
(
auto
&
reader
:
pending_
)
{
waiting_reader_idx_
->
Close
();
reader
->
Shutdown
();
if
(
scheduler_
.
joinable
())
{
}
scheduler_
.
join
();
for
(
auto
&
fu
:
futures_
)
{
}
fu
.
wait
();
delete
buffer_
;
}
delete
available_thread_idx_
;
futures_
.
clear
();
delete
waiting_reader_idx_
;
for
(
auto
&
reader
:
pending_
)
{
}
reader
->
Start
();
done_
.
emplace_back
(
std
::
move
(
reader
));
void
MultiFileReader
::
ScheduleThreadFunc
()
{
VLOG
(
5
)
<<
"MultiFileReader schedule thread starts."
;
size_t
completed_thread_num
=
0
;
size_t
thread_idx
;
while
(
available_thread_idx_
->
Receive
(
&
thread_idx
))
{
std
::
thread
&
prefetcher
=
prefetchers_
[
thread_idx
];
if
(
prefetcher
.
joinable
())
{
prefetcher
.
join
();
}
size_t
reader_idx
;
if
(
waiting_reader_idx_
->
Receive
(
&
reader_idx
))
{
// Still have files to read. Start a new prefetch thread.
prefetcher
=
std
::
thread
([
this
,
reader_idx
,
thread_idx
]
{
PrefetchThreadFunc
(
reader_idx
,
thread_idx
);
});
}
else
{
// No more file to read.
++
completed_thread_num
;
if
(
completed_thread_num
==
prefetchers_
.
size
())
{
buffer_
->
Close
();
break
;
}
}
pending_
.
clear
();
bool
timeout
;
complete_queue_
.
PopAll
(
1000
,
&
timeout
);
PADDLE_ENFORCE
(
!
timeout
);
}
}
}
}
// If users invoke Shutdown() when scheduler is running, it will close the
// 'avaiable_thread_idx_' and prefecther threads have no way to tell scheduler
void
Start
()
override
{
// to release their resource. So a check is needed before scheduler ends.
for
(
auto
&
reader
:
done_
)
{
for
(
auto
&
p
:
prefetchers_
)
{
AppendReader
(
std
::
move
(
reader
));
if
(
p
.
joinable
())
{
p
.
join
();
}
}
done_
.
clear
();
}
}
VLOG
(
5
)
<<
"MultiFileReader schedule thread terminates."
;
}
void
ReadNext
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
{
if
(
!
pending_
.
empty
())
{
void
MultiFileReader
::
PrefetchThreadFunc
(
size_t
reader_idx
,
size_t
thread_idx
)
{
auto
future_it
=
complete_queue_
.
Pop
();
VLOG
(
5
)
<<
"The prefetch thread of file idx '"
<<
reader_idx
<<
"' starts."
;
FutureItem
item
=
future_it
->
get
();
std
::
unique_ptr
<
framework
::
ReaderBase
>&
reader
=
readers_
[
reader_idx
];
if
(
item
.
data_
.
empty
())
{
// reader done.
while
(
true
)
{
done_
.
emplace_back
(
std
::
move
(
*
item
.
reader_it_
));
std
::
vector
<
framework
::
LoDTensor
>
ins
;
pending_
.
erase
(
item
.
reader_it_
);
reader
->
ReadNext
(
&
ins
);
futures_
.
erase
(
future_it
);
if
(
ins
.
empty
())
{
ReadNext
(
out
);
reader
->
Shutdown
();
}
else
{
reader
->
Start
();
*
out
=
item
.
data_
;
break
;
// continue read async
AsyncRead
(
item
.
reader_it_
,
&
future_it
);
}
}
else
{
out
->
clear
();
}
}
try
{
}
buffer_
->
Send
(
std
::
move
(
ins
));
}
catch
(
paddle
::
platform
::
EnforceNotMet
e
)
{
private:
VLOG
(
5
)
<<
"WARNING: The buffer channel has been closed. The prefetch "
void
AppendReader
(
std
::
unique_ptr
<
framework
::
ReaderBase
>&&
readers
)
override
{
"thread of file idx '"
pending_
.
emplace_back
();
<<
reader_idx
<<
"' will terminate."
;
auto
reader_it
=
pending_
.
end
();
break
;
--
reader_it
;
futures_
.
emplace_back
();
auto
future_it
=
futures_
.
end
();
--
future_it
;
AsyncRead
(
reader_it
,
&
future_it
);
}
void
AsyncRead
(
const
ReaderList
::
iterator
&
reader_it
,
FutureList
::
iterator
*
future_it_ptr
)
{
auto
&
future_it
=
*
future_it_ptr
;
*
future_it
=
pool_
.
enqueue
([
reader_it
,
future_it
,
this
]
{
FutureItem
item
;
item
.
reader_it_
=
reader_it
;
(
*
reader_it
)
->
ReadNext
(
&
item
.
data_
);
if
(
item
.
data_
.
empty
())
{
(
*
reader_it
)
->
Shutdown
();
(
*
reader_it
)
->
Start
();
}
complete_queue_
.
Push
(
future_it
);
return
item
;
});
}
FutureList
futures_
;
ThreadPool
pool_
;
framework
::
BlockingQueue
<
FutureList
::
iterator
>
complete_queue_
;
std
::
list
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
pending_
;
std
::
list
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
done_
;
};
class
MultiFileReader
:
public
framework
::
ReaderBase
{
public:
MultiFileReader
(
const
std
::
vector
<
std
::
string
>&
file_names
,
std
::
unique_ptr
<
IReaderContainer
>&&
container
)
:
container_
(
std
::
move
(
container
))
{
for
(
auto
&
fn
:
file_names
)
{
container_
->
AppendReader
(
CreateReaderByFileName
(
fn
));
}
}
}
}
if
(
!
available_thread_idx_
->
Send
(
thread_idx
))
{
~
MultiFileReader
()
{
container_
->
Stop
();
}
VLOG
(
5
)
<<
"WARNING: The available_thread_idx_ channel has been closed. "
"Fail to send thread_idx."
;
protected:
void
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
{
container_
->
ReadNext
(
out
);
}
}
VLOG
(
5
)
<<
"The prefetch thread of file idx '"
<<
reader_idx
void
ShutdownImpl
()
override
{
container_
->
Stop
();
}
<<
"' terminates."
;
void
StartImpl
()
override
{
container_
->
Start
();
}
}
private:
std
::
unique_ptr
<
IReaderContainer
>
container_
;
};
class
OpenFilesOp
:
public
framework
::
OperatorBase
{
class
OpenFilesOp
:
public
framework
::
OperatorBase
{
public:
public:
...
@@ -173,13 +205,22 @@ class OpenFilesOp : public framework::OperatorBase {
...
@@ -173,13 +205,22 @@ class OpenFilesOp : public framework::OperatorBase {
"shape concat's length."
);
"shape concat's length."
);
const
auto
&
file_names
=
Attr
<
std
::
vector
<
std
::
string
>>
(
"file_names"
);
const
auto
&
file_names
=
Attr
<
std
::
vector
<
std
::
string
>>
(
"file_names"
);
PADDLE_ENFORCE
(
!
file_names
.
empty
(),
"No file to be read!"
);
PADDLE_ENFORCE
(
!
file_names
.
empty
(),
"No file to be read!"
);
const
size_t
thread_num
=
Attr
<
int
>
(
"thread_num"
);
bool
is_test
=
Attr
<
bool
>
(
"is_test"
);
const
size_t
buffer_size
=
Attr
<
int
>
(
"buffer_size"
);
auto
*
out
=
scope
.
FindVar
(
Output
(
"Out"
))
auto
*
out
=
scope
.
FindVar
(
Output
(
"Out"
))
->
template
GetMutable
<
framework
::
ReaderHolder
>();
->
template
GetMutable
<
framework
::
ReaderHolder
>();
std
::
unique_ptr
<
IReaderContainer
>
container
;
if
(
is_test
)
{
container
.
reset
(
new
OrderedReaderContainer
());
}
else
{
container
.
reset
(
new
PreemptiveReaderContainer
(
std
::
min
(
file_names
.
size
(),
static_cast
<
size_t
>
(
std
::
thread
::
hardware_concurrency
()))));
}
out
->
Reset
(
out
->
Reset
(
std
::
make_shared
<
MultiFileReader
>
(
file_names
,
thread_num
,
buffer_size
));
std
::
make_shared
<
MultiFileReader
>
(
file_names
,
std
::
move
(
container
)
));
}
}
};
};
...
@@ -187,9 +228,7 @@ class OpenFilesOpMaker : public FileReaderMakerBase {
...
@@ -187,9 +228,7 @@ class OpenFilesOpMaker : public FileReaderMakerBase {
protected:
protected:
void
Apply
()
override
{
void
Apply
()
override
{
AddAttr
<
std
::
vector
<
std
::
string
>>
(
"file_names"
,
"Files to be read."
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
"file_names"
,
"Files to be read."
);
AddAttr
<
int
>
(
"thread_num"
,
"The maximal concurrent prefetch thread number."
)
AddAttr
<
bool
>
(
"is_test"
,
"Used for testing data."
).
SetDefault
(
false
);
.
GreaterThan
(
0
);
AddAttr
<
int
>
(
"buffer_size"
,
"The size of prefetch buffer."
).
GreaterThan
(
0
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
OpenFiles Operator
OpenFiles Operator
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
ba997b8c
...
@@ -87,37 +87,37 @@ PYBIND11_PLUGIN(core) {
...
@@ -87,37 +87,37 @@ PYBIND11_PLUGIN(core) {
py
::
class_
<
Tensor
>
(
m
,
"Tensor"
,
py
::
buffer_protocol
())
py
::
class_
<
Tensor
>
(
m
,
"Tensor"
,
py
::
buffer_protocol
())
.
def_buffer
(
.
def_buffer
(
[](
Tensor
&
self
)
->
py
::
buffer_info
{
return
CastToPyBuffer
(
self
);
})
[](
Tensor
&
self
)
->
py
::
buffer_info
{
return
CastToPyBuffer
(
self
);
})
.
def
(
"get_dims"
,
.
def
(
"
_
get_dims"
,
[](
const
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
[](
const
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_dims"
,
.
def
(
"
_
set_dims"
,
[](
Tensor
&
self
,
const
std
::
vector
<
int64_t
>
&
dim
)
{
[](
Tensor
&
self
,
const
std
::
vector
<
int64_t
>
&
dim
)
{
self
.
Resize
(
make_ddim
(
dim
));
self
.
Resize
(
make_ddim
(
dim
));
})
})
.
def
(
"set_layout"
,
.
def
(
"
_
set_layout"
,
[](
Tensor
&
self
,
const
std
::
string
&
layout
)
{
[](
Tensor
&
self
,
const
std
::
string
&
layout
)
{
self
.
set_layout
(
StringToDataLayout
(
layout
));
self
.
set_layout
(
StringToDataLayout
(
layout
));
})
})
.
def
(
"alloc_float"
,
.
def
(
"
_
alloc_float"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
self
.
mutable_data
<
float
>
(
place
);
self
.
mutable_data
<
float
>
(
place
);
})
})
.
def
(
"alloc_float"
,
.
def
(
"
_
alloc_float"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
)
{
[](
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
)
{
self
.
mutable_data
<
float
>
(
place
);
self
.
mutable_data
<
float
>
(
place
);
})
})
.
def
(
"alloc_int"
,
.
def
(
"
_
alloc_int"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
)
{
[](
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
)
{
self
.
mutable_data
<
int
>
(
place
);
self
.
mutable_data
<
int
>
(
place
);
})
})
.
def
(
"alloc_int"
,
.
def
(
"
_
alloc_int"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
self
.
mutable_data
<
int
>
(
place
);
self
.
mutable_data
<
int
>
(
place
);
})
})
.
def
(
"alloc_int"
,
.
def
(
"
_
alloc_int"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
self
.
mutable_data
<
int
>
(
place
);
self
.
mutable_data
<
int
>
(
place
);
})
})
.
def
(
"alloc_float"
,
.
def
(
"
_
alloc_float"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
self
.
mutable_data
<
float
>
(
place
);
self
.
mutable_data
<
float
>
(
place
);
})
})
...
@@ -145,11 +145,11 @@ PYBIND11_PLUGIN(core) {
...
@@ -145,11 +145,11 @@ PYBIND11_PLUGIN(core) {
.
def
(
"set"
,
PyCUDAPinnedTensorSetFromArray
<
uint8_t
>
)
.
def
(
"set"
,
PyCUDAPinnedTensorSetFromArray
<
uint8_t
>
)
#endif
#endif
.
def
(
"shape"
,
[](
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"shape"
,
[](
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_float_element"
,
TensorSetElement
<
float
>
)
.
def
(
"
_
set_float_element"
,
TensorSetElement
<
float
>
)
.
def
(
"get_float_element"
,
TensorGetElement
<
float
>
)
.
def
(
"
_
get_float_element"
,
TensorGetElement
<
float
>
)
.
def
(
"set_double_element"
,
TensorSetElement
<
double
>
)
.
def
(
"
_
set_double_element"
,
TensorSetElement
<
double
>
)
.
def
(
"get_double_element"
,
TensorGetElement
<
double
>
)
.
def
(
"
_
get_double_element"
,
TensorGetElement
<
double
>
)
.
def
(
"dtype"
,
[](
Tensor
&
self
)
{
return
ToDataType
(
self
.
type
());
});
.
def
(
"
_
dtype"
,
[](
Tensor
&
self
)
{
return
ToDataType
(
self
.
type
());
});
py
::
class_
<
LoDTensor
,
Tensor
>
(
m
,
"LoDTensor"
)
py
::
class_
<
LoDTensor
,
Tensor
>
(
m
,
"LoDTensor"
)
.
def_buffer
(
.
def_buffer
(
...
...
python/paddle/fluid/clip.py
浏览文件 @
ba997b8c
...
@@ -31,7 +31,7 @@ class BaseErrorClipAttr(object):
...
@@ -31,7 +31,7 @@ class BaseErrorClipAttr(object):
def
__str__
(
self
):
def
__str__
(
self
):
raise
NotImplementedError
()
raise
NotImplementedError
()
def
append_clip_op
(
self
,
block
,
grad_name
):
def
_
append_clip_op
(
self
,
block
,
grad_name
):
raise
NotImplementedError
()
raise
NotImplementedError
()
...
@@ -67,7 +67,7 @@ class ErrorClipByValue(BaseErrorClipAttr):
...
@@ -67,7 +67,7 @@ class ErrorClipByValue(BaseErrorClipAttr):
def
__str__
(
self
):
def
__str__
(
self
):
return
"ByValue, min=%f, max=%f"
%
(
self
.
min
,
self
.
max
)
return
"ByValue, min=%f, max=%f"
%
(
self
.
min
,
self
.
max
)
def
append_clip_op
(
self
,
block
,
grad_name
):
def
_
append_clip_op
(
self
,
block
,
grad_name
):
clip_op_desc
=
block
.
desc
.
append_op
()
clip_op_desc
=
block
.
desc
.
append_op
()
clip_op_desc
.
set_type
(
"clip"
)
clip_op_desc
.
set_type
(
"clip"
)
clip_op_desc
.
set_input
(
"X"
,
[
grad_name
])
clip_op_desc
.
set_input
(
"X"
,
[
grad_name
])
...
@@ -90,17 +90,17 @@ def error_clip_callback(block, context):
...
@@ -90,17 +90,17 @@ def error_clip_callback(block, context):
"Variable's error_clip should be an instance of BaseErrorClipAttr or None."
"Variable's error_clip should be an instance of BaseErrorClipAttr or None."
)
)
if
error_clip
is
not
None
:
if
error_clip
is
not
None
:
error_clip
.
append_clip_op
(
block
,
grad_n
)
error_clip
.
_
append_clip_op
(
block
,
grad_n
)
class
BaseGradientClipAttr
(
object
):
class
BaseGradientClipAttr
(
object
):
def
__str__
(
self
):
def
__str__
(
self
):
raise
NotImplementedError
()
raise
NotImplementedError
()
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
raise
NotImplementedError
()
raise
NotImplementedError
()
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
raise
NotImplementedError
()
raise
NotImplementedError
()
...
@@ -108,10 +108,10 @@ class NullGradientClipAttr(BaseGradientClipAttr):
...
@@ -108,10 +108,10 @@ class NullGradientClipAttr(BaseGradientClipAttr):
def
__str__
(
self
):
def
__str__
(
self
):
return
"Null"
return
"Null"
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
pass
pass
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
return
param
,
grad
return
param
,
grad
...
@@ -153,10 +153,10 @@ class GradientClipByValue(BaseGradientClipAttr):
...
@@ -153,10 +153,10 @@ class GradientClipByValue(BaseGradientClipAttr):
def
__str__
(
self
):
def
__str__
(
self
):
return
"ByValue, min=%f, max=%f"
%
(
self
.
min
,
self
.
max
)
return
"ByValue, min=%f, max=%f"
%
(
self
.
min
,
self
.
max
)
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
pass
pass
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
new_grad
=
layers
.
clip
(
x
=
grad
,
min
=
self
.
min
,
max
=
self
.
max
)
new_grad
=
layers
.
clip
(
x
=
grad
,
min
=
self
.
min
,
max
=
self
.
max
)
return
param
,
new_grad
return
param
,
new_grad
...
@@ -199,10 +199,10 @@ class GradientClipByNorm(BaseGradientClipAttr):
...
@@ -199,10 +199,10 @@ class GradientClipByNorm(BaseGradientClipAttr):
def
__str__
(
self
):
def
__str__
(
self
):
return
"ByNorm, clip_norm=%f"
%
self
.
clip_norm
return
"ByNorm, clip_norm=%f"
%
self
.
clip_norm
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
pass
pass
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
new_grad
=
layers
.
clip_by_norm
(
x
=
grad
,
max_norm
=
self
.
clip_norm
)
new_grad
=
layers
.
clip_by_norm
(
x
=
grad
,
max_norm
=
self
.
clip_norm
)
return
param
,
new_grad
return
param
,
new_grad
...
@@ -257,7 +257,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
...
@@ -257,7 +257,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
return
"ByGlobalNorm, group_name=%s, clip_norm=%f"
%
(
self
.
group_name
,
return
"ByGlobalNorm, group_name=%s, clip_norm=%f"
%
(
self
.
group_name
,
self
.
clip_norm
)
self
.
clip_norm
)
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
if
self
.
group_name
not
in
context
:
if
self
.
group_name
not
in
context
:
context
[
self
.
group_name
]
=
[]
context
[
self
.
group_name
]
=
[]
context
[
self
.
group_name
+
"_clip_value"
]
=
self
.
clip_norm
context
[
self
.
group_name
+
"_clip_value"
]
=
self
.
clip_norm
...
@@ -274,7 +274,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
...
@@ -274,7 +274,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
self
.
context
=
context
self
.
context
=
context
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
group_scale_name
=
self
.
group_name
+
"_scale"
group_scale_name
=
self
.
group_name
+
"_scale"
if
group_scale_name
not
in
self
.
context
:
if
group_scale_name
not
in
self
.
context
:
group_norm_var
=
layers
.
sums
(
input
=
self
.
context
[
self
.
group_name
])
group_norm_var
=
layers
.
sums
(
input
=
self
.
context
[
self
.
group_name
])
...
@@ -336,12 +336,12 @@ def append_gradient_clip_ops(param_grad):
...
@@ -336,12 +336,12 @@ def append_gradient_clip_ops(param_grad):
"clip attribute should be an instance of BaseGradientClipAttr"
"clip attribute should be an instance of BaseGradientClipAttr"
)
)
clip_attr
.
process_context
(
context
=
context
,
param
=
p
,
grad
=
g
)
clip_attr
.
_
process_context
(
context
=
context
,
param
=
p
,
grad
=
g
)
res
=
[]
res
=
[]
for
p
,
g
in
param_grad
:
for
p
,
g
in
param_grad
:
with
p
.
block
.
program
.
optimized_guard
(
p
):
with
p
.
block
.
program
.
optimized_guard
(
p
):
res
.
append
(
clip_attr
.
create_operators
(
param
=
p
,
grad
=
g
))
res
.
append
(
clip_attr
.
_
create_operators
(
param
=
p
,
grad
=
g
))
return
res
return
res
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
ba997b8c
...
@@ -68,11 +68,11 @@ class LayerHelper(object):
...
@@ -68,11 +68,11 @@ class LayerHelper(object):
@
property
@
property
def
param_attr
(
self
):
def
param_attr
(
self
):
return
ParamAttr
.
to_attr
(
self
.
kwargs
.
get
(
'param_attr'
,
None
))
return
ParamAttr
.
_
to_attr
(
self
.
kwargs
.
get
(
'param_attr'
,
None
))
@
property
@
property
def
bias_attr
(
self
):
def
bias_attr
(
self
):
return
ParamAttr
.
to_attr
(
self
.
kwargs
.
get
(
'bias_attr'
,
None
))
return
ParamAttr
.
_
to_attr
(
self
.
kwargs
.
get
(
'bias_attr'
,
None
))
def
multiple_param_attr
(
self
,
length
):
def
multiple_param_attr
(
self
,
length
):
param_attr
=
self
.
param_attr
param_attr
=
self
.
param_attr
...
@@ -262,11 +262,11 @@ class LayerHelper(object):
...
@@ -262,11 +262,11 @@ class LayerHelper(object):
g_param
=
self
.
startup_program
.
global_block
().
create_parameter
(
g_param
=
self
.
startup_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
dtype
=
dtype
,
shape
=
g_param_shape
,
shape
=
g_param_shape
,
**
g_param_attr
.
to_kwargs
(
with_initializer
=
False
))
**
g_param_attr
.
_
to_kwargs
(
with_initializer
=
False
))
v_param
=
self
.
startup_program
.
global_block
().
create_parameter
(
v_param
=
self
.
startup_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
dtype
=
dtype
,
shape
=
v_param_shape
,
shape
=
v_param_shape
,
**
v_param_attr
.
to_kwargs
(
with_initializer
=
True
))
**
v_param_attr
.
_
to_kwargs
(
with_initializer
=
True
))
__norm_except_dim
(
__norm_except_dim
(
x
=
v_param
,
x
=
v_param
,
out
=
g_param
,
out
=
g_param
,
...
@@ -275,9 +275,9 @@ class LayerHelper(object):
...
@@ -275,9 +275,9 @@ class LayerHelper(object):
# Add weight normalization to main_program
# Add weight normalization to main_program
g_param
=
self
.
main_program
.
global_block
().
create_parameter
(
g_param
=
self
.
main_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
g_param_shape
,
**
g_param_attr
.
to_kwargs
())
dtype
=
dtype
,
shape
=
g_param_shape
,
**
g_param_attr
.
_
to_kwargs
())
v_param
=
self
.
main_program
.
global_block
().
create_parameter
(
v_param
=
self
.
main_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
v_param_shape
,
**
v_param_attr
.
to_kwargs
())
dtype
=
dtype
,
shape
=
v_param_shape
,
**
v_param_attr
.
_
to_kwargs
())
w_param
=
__weight_normalize
(
g_param
,
v_param
,
dim
=
attr
.
dim
)
w_param
=
__weight_normalize
(
g_param
,
v_param
,
dim
=
attr
.
dim
)
return
w_param
return
w_param
...
@@ -296,11 +296,11 @@ class LayerHelper(object):
...
@@ -296,11 +296,11 @@ class LayerHelper(object):
if
default_initializer
is
None
and
attr
.
initializer
is
None
:
if
default_initializer
is
None
and
attr
.
initializer
is
None
:
if
is_bias
:
if
is_bias
:
attr
.
set_default_bias_initializer
()
attr
.
_
set_default_bias_initializer
()
else
:
else
:
attr
.
set_default_param_initializer
()
attr
.
_
set_default_param_initializer
()
else
:
else
:
attr
.
set_default_initializer
(
default_initializer
)
attr
.
_
set_default_initializer
(
default_initializer
)
# If weight normalization is set, insert extra parameters and ops.
# If weight normalization is set, insert extra parameters and ops.
# Refer to https://arxiv.org/pdf/1602.07868.pdf
# Refer to https://arxiv.org/pdf/1602.07868.pdf
...
@@ -310,9 +310,9 @@ class LayerHelper(object):
...
@@ -310,9 +310,9 @@ class LayerHelper(object):
return
param
return
param
self
.
startup_program
.
global_block
().
create_parameter
(
self
.
startup_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
to_kwargs
(
with_initializer
=
True
))
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
_
to_kwargs
(
with_initializer
=
True
))
return
self
.
main_program
.
global_block
().
create_parameter
(
return
self
.
main_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
to_kwargs
())
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
_
to_kwargs
())
def
get_parameter
(
self
,
name
):
def
get_parameter
(
self
,
name
):
param
=
self
.
main_program
.
global_block
().
var
(
name
)
param
=
self
.
main_program
.
global_block
().
var
(
name
)
...
...
python/paddle/fluid/param_attr.py
浏览文件 @
ba997b8c
...
@@ -67,7 +67,7 @@ class ParamAttr(object):
...
@@ -67,7 +67,7 @@ class ParamAttr(object):
self
.
gradient_clip
=
gradient_clip
self
.
gradient_clip
=
gradient_clip
self
.
model_average
=
do_model_average
self
.
model_average
=
do_model_average
def
set_default_initializer
(
self
,
initializer
):
def
_
set_default_initializer
(
self
,
initializer
):
"""
"""
Set the default initializer, the initializer should be Constant,
Set the default initializer, the initializer should be Constant,
Uniform, Normal, Xavier, MSRA.
Uniform, Normal, Xavier, MSRA.
...
@@ -88,7 +88,7 @@ class ParamAttr(object):
...
@@ -88,7 +88,7 @@ class ParamAttr(object):
self
.
initializer
=
initializer
self
.
initializer
=
initializer
def
set_default_param_initializer
(
self
):
def
_
set_default_param_initializer
(
self
):
"""
"""
Set the default initializer for the parameter with Xavier.
Set the default initializer for the parameter with Xavier.
...
@@ -98,9 +98,9 @@ class ParamAttr(object):
...
@@ -98,9 +98,9 @@ class ParamAttr(object):
Returns:
Returns:
None.
None.
"""
"""
self
.
set_default_initializer
(
Xavier
())
self
.
_
set_default_initializer
(
Xavier
())
def
set_default_bias_initializer
(
self
):
def
_
set_default_bias_initializer
(
self
):
"""
"""
Set the default initializer for the bias with Constant(0.0).
Set the default initializer for the bias with Constant(0.0).
...
@@ -110,10 +110,10 @@ class ParamAttr(object):
...
@@ -110,10 +110,10 @@ class ParamAttr(object):
Returns:
Returns:
None.
None.
"""
"""
self
.
set_default_initializer
(
Constant
(
0.0
))
self
.
_
set_default_initializer
(
Constant
(
0.0
))
@
staticmethod
@
staticmethod
def
to_attr
(
arg
):
def
_
to_attr
(
arg
):
"""
"""
Create ParamAttr[s].
Create ParamAttr[s].
...
@@ -131,7 +131,7 @@ class ParamAttr(object):
...
@@ -131,7 +131,7 @@ class ParamAttr(object):
if
arg
is
None
:
if
arg
is
None
:
return
ParamAttr
()
return
ParamAttr
()
elif
isinstance
(
arg
,
list
)
or
isinstance
(
arg
,
tuple
):
elif
isinstance
(
arg
,
list
)
or
isinstance
(
arg
,
tuple
):
return
[
ParamAttr
.
to_attr
(
a
)
for
a
in
arg
]
return
[
ParamAttr
.
_
to_attr
(
a
)
for
a
in
arg
]
elif
isinstance
(
arg
,
ParamAttr
):
elif
isinstance
(
arg
,
ParamAttr
):
return
arg
return
arg
elif
isinstance
(
arg
,
str
)
or
isinstance
(
arg
,
unicode
):
elif
isinstance
(
arg
,
str
)
or
isinstance
(
arg
,
unicode
):
...
@@ -141,11 +141,11 @@ class ParamAttr(object):
...
@@ -141,11 +141,11 @@ class ParamAttr(object):
elif
isinstance
(
arg
,
WeightDecayRegularizer
):
elif
isinstance
(
arg
,
WeightDecayRegularizer
):
return
ParamAttr
(
regularizer
=
arg
)
return
ParamAttr
(
regularizer
=
arg
)
elif
isinstance
(
arg
,
bool
):
elif
isinstance
(
arg
,
bool
):
return
ParamAttr
.
to_attr
(
None
)
if
arg
else
False
return
ParamAttr
.
_
to_attr
(
None
)
if
arg
else
False
else
:
else
:
raise
TypeError
(
"{0} cast to ParamAttr"
.
format
(
type
(
arg
)))
raise
TypeError
(
"{0} cast to ParamAttr"
.
format
(
type
(
arg
)))
def
to_kwargs
(
self
,
with_initializer
=
False
):
def
_
to_kwargs
(
self
,
with_initializer
=
False
):
"""
"""
Returns the attributes of this parameter.
Returns the attributes of this parameter.
...
...
python/paddle/fluid/regularizer.py
浏览文件 @
ba997b8c
...
@@ -15,10 +15,7 @@
...
@@ -15,10 +15,7 @@
import
framework
import
framework
from
.
import
core
from
.
import
core
__all__
=
[
__all__
=
[
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
'append_regularization_ops'
,
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
def
append_regularization_ops
(
parameters_and_grads
,
regularization
=
None
):
def
append_regularization_ops
(
parameters_and_grads
,
regularization
=
None
):
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
ba997b8c
...
@@ -60,8 +60,8 @@ def get_numeric_gradient(place,
...
@@ -60,8 +60,8 @@ def get_numeric_gradient(place,
return
np
.
array
(
sum
).
mean
()
return
np
.
array
(
sum
).
mean
()
tensor_to_check
=
scope
.
find_var
(
input_to_check
).
get_tensor
()
tensor_to_check
=
scope
.
find_var
(
input_to_check
).
get_tensor
()
tensor_size
=
product
(
tensor_to_check
.
get_dims
())
tensor_size
=
product
(
tensor_to_check
.
shape
())
tensor_to_check_dtype
=
tensor_to_check
.
dtype
()
tensor_to_check_dtype
=
tensor_to_check
.
_
dtype
()
if
tensor_to_check_dtype
==
core
.
VarDesc
.
VarType
.
FP32
:
if
tensor_to_check_dtype
==
core
.
VarDesc
.
VarType
.
FP32
:
tensor_to_check_dtype
=
np
.
float32
tensor_to_check_dtype
=
np
.
float32
elif
tensor_to_check_dtype
==
core
.
VarDesc
.
VarType
.
FP64
:
elif
tensor_to_check_dtype
==
core
.
VarDesc
.
VarType
.
FP64
:
...
@@ -74,15 +74,15 @@ def get_numeric_gradient(place,
...
@@ -74,15 +74,15 @@ def get_numeric_gradient(place,
def
__get_elem__
(
tensor
,
i
):
def
__get_elem__
(
tensor
,
i
):
if
tensor_to_check_dtype
==
np
.
float32
:
if
tensor_to_check_dtype
==
np
.
float32
:
return
tensor
.
get_float_element
(
i
)
return
tensor
.
_
get_float_element
(
i
)
else
:
else
:
return
tensor
.
get_double_element
(
i
)
return
tensor
.
_
get_double_element
(
i
)
def
__set_elem__
(
tensor
,
i
,
e
):
def
__set_elem__
(
tensor
,
i
,
e
):
if
tensor_to_check_dtype
==
np
.
float32
:
if
tensor_to_check_dtype
==
np
.
float32
:
tensor
.
set_float_element
(
i
,
e
)
tensor
.
_
set_float_element
(
i
,
e
)
else
:
else
:
tensor
.
set_double_element
(
i
,
e
)
tensor
.
_
set_double_element
(
i
,
e
)
# we only compute gradient of one element each time.
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
# we use a for loop to compute the gradient of every element.
...
@@ -107,7 +107,7 @@ def get_numeric_gradient(place,
...
@@ -107,7 +107,7 @@ def get_numeric_gradient(place,
__set_elem__
(
tensor_to_check
,
i
,
origin
)
__set_elem__
(
tensor_to_check
,
i
,
origin
)
gradient_flat
[
i
]
=
(
y_pos
-
y_neg
)
/
delta
/
2
gradient_flat
[
i
]
=
(
y_pos
-
y_neg
)
/
delta
/
2
return
gradient_flat
.
reshape
(
tensor_to_check
.
get_dims
())
return
gradient_flat
.
reshape
(
tensor_to_check
.
shape
())
class
OpTest
(
unittest
.
TestCase
):
class
OpTest
(
unittest
.
TestCase
):
...
@@ -125,7 +125,7 @@ class OpTest(unittest.TestCase):
...
@@ -125,7 +125,7 @@ class OpTest(unittest.TestCase):
@
classmethod
@
classmethod
def
tearDownClass
(
cls
):
def
tearDownClass
(
cls
):
'''Restore random seeds'''
"""Restore random seeds"""
np
.
random
.
set_state
(
cls
.
_np_rand_state
)
np
.
random
.
set_state
(
cls
.
_np_rand_state
)
random
.
setstate
(
cls
.
_py_rand_state
)
random
.
setstate
(
cls
.
_py_rand_state
)
...
...
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
ba997b8c
...
@@ -129,7 +129,6 @@ def create_or_get_tensor(scope, var_name, var, place):
...
@@ -129,7 +129,6 @@ def create_or_get_tensor(scope, var_name, var, place):
if
var
is
not
None
:
if
var
is
not
None
:
assert
isinstance
(
var
,
np
.
ndarray
)
assert
isinstance
(
var
,
np
.
ndarray
)
tensor
.
set_recursive_sequence_lengths
([])
tensor
.
set_recursive_sequence_lengths
([])
tensor
.
set_dims
(
var
.
shape
)
tensor
.
set
(
var
,
place
)
tensor
.
set
(
var
,
place
)
return
tensor
return
tensor
...
...
python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py
浏览文件 @
ba997b8c
...
@@ -65,10 +65,10 @@ class TestDyRnnStaticInput(unittest.TestCase):
...
@@ -65,10 +65,10 @@ class TestDyRnnStaticInput(unittest.TestCase):
return
self
.
_lodtensor_to_ndarray
(
fetch_outs
[
0
])
return
self
.
_lodtensor_to_ndarray
(
fetch_outs
[
0
])
def
_lodtensor_to_ndarray
(
self
,
lod_tensor
):
def
_lodtensor_to_ndarray
(
self
,
lod_tensor
):
dims
=
lod_tensor
.
get_dims
()
dims
=
lod_tensor
.
shape
()
ndarray
=
np
.
zeros
(
shape
=
dims
).
astype
(
'float32'
)
ndarray
=
np
.
zeros
(
shape
=
dims
).
astype
(
'float32'
)
for
i
in
xrange
(
np
.
product
(
dims
)):
for
i
in
xrange
(
np
.
product
(
dims
)):
ndarray
.
ravel
()[
i
]
=
lod_tensor
.
get_float_element
(
i
)
ndarray
.
ravel
()[
i
]
=
lod_tensor
.
_
get_float_element
(
i
)
return
ndarray
,
lod_tensor
.
recursive_sequence_lengths
()
return
ndarray
,
lod_tensor
.
recursive_sequence_lengths
()
def
build_graph
(
self
,
only_forward
=
False
):
def
build_graph
(
self
,
only_forward
=
False
):
...
@@ -185,19 +185,19 @@ class TestDyRnnStaticInput(unittest.TestCase):
...
@@ -185,19 +185,19 @@ class TestDyRnnStaticInput(unittest.TestCase):
actual_gradients
,
actual_lod
=
self
.
fetch_value
(
static_input_grad
)
actual_gradients
,
actual_lod
=
self
.
fetch_value
(
static_input_grad
)
static_input_shape
=
self
.
static_input_tensor
.
get_dims
()
static_input_shape
=
self
.
static_input_tensor
.
shape
()
numeric_gradients
=
np
.
zeros
(
shape
=
static_input_shape
).
astype
(
'float32'
)
numeric_gradients
=
np
.
zeros
(
shape
=
static_input_shape
).
astype
(
'float32'
)
# calculate numeric gradients
# calculate numeric gradients
tensor_size
=
np
.
product
(
static_input_shape
)
tensor_size
=
np
.
product
(
static_input_shape
)
for
i
in
xrange
(
tensor_size
):
for
i
in
xrange
(
tensor_size
):
origin
=
self
.
static_input_tensor
.
get_float_element
(
i
)
origin
=
self
.
static_input_tensor
.
_
get_float_element
(
i
)
x_pos
=
origin
+
self
.
_delta
x_pos
=
origin
+
self
.
_delta
self
.
static_input_tensor
.
set_float_element
(
i
,
x_pos
)
self
.
static_input_tensor
.
_
set_float_element
(
i
,
x_pos
)
y_pos
=
self
.
fetch_value
(
loss
)[
0
][
0
]
y_pos
=
self
.
fetch_value
(
loss
)[
0
][
0
]
x_neg
=
origin
-
self
.
_delta
x_neg
=
origin
-
self
.
_delta
self
.
static_input_tensor
.
set_float_element
(
i
,
x_neg
)
self
.
static_input_tensor
.
_
set_float_element
(
i
,
x_neg
)
y_neg
=
self
.
fetch_value
(
loss
)[
0
][
0
]
y_neg
=
self
.
fetch_value
(
loss
)[
0
][
0
]
self
.
static_input_tensor
.
set_float_element
(
i
,
origin
)
self
.
static_input_tensor
.
_
set_float_element
(
i
,
origin
)
numeric_gradients
.
ravel
()[
i
]
=
(
y_pos
-
y_neg
)
/
self
.
_delta
/
2
numeric_gradients
.
ravel
()[
i
]
=
(
y_pos
-
y_neg
)
/
self
.
_delta
/
2
self
.
assertTrue
(
np
.
allclose
(
actual_gradients
,
numeric_gradients
,
0.001
))
self
.
assertTrue
(
np
.
allclose
(
actual_gradients
,
numeric_gradients
,
0.001
))
self
.
assertTrue
(
self
.
assertTrue
(
...
...
python/paddle/fluid/tests/unittests/test_selected_rows.py
浏览文件 @
ba997b8c
...
@@ -40,12 +40,12 @@ class TestSelectedRows(unittest.TestCase):
...
@@ -40,12 +40,12 @@ class TestSelectedRows(unittest.TestCase):
# compare tensor
# compare tensor
self
.
assertAlmostEqual
(
2.0
,
self
.
assertAlmostEqual
(
2.0
,
selected_rows
.
get_tensor
().
get_float_element
(
0
))
selected_rows
.
get_tensor
().
_
get_float_element
(
0
))
self
.
assertAlmostEqual
(
1.0
,
self
.
assertAlmostEqual
(
1.0
,
selected_rows
.
get_tensor
().
get_float_element
(
1
))
selected_rows
.
get_tensor
().
_
get_float_element
(
1
))
self
.
assertAlmostEqual
(
self
.
assertAlmostEqual
(
4.0
,
4.0
,
selected_rows
.
get_tensor
().
get_float_element
(
2
*
row_numel
+
8
))
selected_rows
.
get_tensor
().
_
get_float_element
(
2
*
row_numel
+
8
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
浏览文件 @
ba997b8c
...
@@ -45,8 +45,8 @@ class TestShrinkRNNMemoryBase(unittest.TestCase):
...
@@ -45,8 +45,8 @@ class TestShrinkRNNMemoryBase(unittest.TestCase):
def
sum_lodtensor
(
self
,
tensor
):
def
sum_lodtensor
(
self
,
tensor
):
sum_res
=
0.0
sum_res
=
0.0
for
i
in
xrange
(
np
.
product
(
tensor
.
get_dims
())):
for
i
in
xrange
(
np
.
product
(
tensor
.
shape
())):
sum_res
+=
tensor
.
get_float_element
(
i
)
sum_res
+=
tensor
.
_
get_float_element
(
i
)
return
sum_res
return
sum_res
...
...
python/paddle/fluid/tests/unittests/test_tensor.py
浏览文件 @
ba997b8c
...
@@ -25,8 +25,8 @@ class TestTensor(unittest.TestCase):
...
@@ -25,8 +25,8 @@ class TestTensor(unittest.TestCase):
tensor
=
var
.
get_tensor
()
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
1000
,
784
])
tensor
.
_
set_dims
([
1000
,
784
])
tensor
.
alloc_int
(
place
)
tensor
.
_
alloc_int
(
place
)
tensor_array
=
numpy
.
array
(
tensor
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
1000
,
784
),
tensor_array
.
shape
)
self
.
assertEqual
((
1000
,
784
),
tensor_array
.
shape
)
tensor_array
[
3
,
9
]
=
1
tensor_array
[
3
,
9
]
=
1
...
@@ -44,8 +44,8 @@ class TestTensor(unittest.TestCase):
...
@@ -44,8 +44,8 @@ class TestTensor(unittest.TestCase):
tensor
=
var
.
get_tensor
()
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
1000
,
784
])
tensor
.
_
set_dims
([
1000
,
784
])
tensor
.
alloc_float
(
place
)
tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
tensor
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
1000
,
784
),
tensor_array
.
shape
)
self
.
assertEqual
((
1000
,
784
),
tensor_array
.
shape
)
...
@@ -63,8 +63,8 @@ class TestTensor(unittest.TestCase):
...
@@ -63,8 +63,8 @@ class TestTensor(unittest.TestCase):
var_lod
=
scope
.
var
(
"test_lod_tensor"
)
var_lod
=
scope
.
var
(
"test_lod_tensor"
)
lod_tensor
=
var_lod
.
get_tensor
()
lod_tensor
=
var_lod
.
get_tensor
()
lod_tensor
.
set_dims
([
4
,
4
,
6
])
lod_tensor
.
_
set_dims
([
4
,
4
,
6
])
lod_tensor
.
alloc_int
(
place
)
lod_tensor
.
_
alloc_int
(
place
)
array
=
numpy
.
array
(
lod_tensor
)
array
=
numpy
.
array
(
lod_tensor
)
array
[
0
,
0
,
0
]
=
3
array
[
0
,
0
,
0
]
=
3
array
[
3
,
3
,
5
]
=
10
array
[
3
,
3
,
5
]
=
10
...
@@ -84,8 +84,8 @@ class TestTensor(unittest.TestCase):
...
@@ -84,8 +84,8 @@ class TestTensor(unittest.TestCase):
var_lod
=
scope
.
var
(
"test_lod_tensor"
)
var_lod
=
scope
.
var
(
"test_lod_tensor"
)
lod_tensor
=
var_lod
.
get_tensor
()
lod_tensor
=
var_lod
.
get_tensor
()
lod_tensor
.
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
_
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
alloc_float
(
place
)
lod_tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
self
.
assertEqual
((
5
,
2
,
3
,
4
),
tensor_array
.
shape
)
self
.
assertEqual
((
5
,
2
,
3
,
4
),
tensor_array
.
shape
)
...
@@ -104,14 +104,13 @@ class TestTensor(unittest.TestCase):
...
@@ -104,14 +104,13 @@ class TestTensor(unittest.TestCase):
self
.
assertListEqual
(
lod_py
,
lod
)
self
.
assertListEqual
(
lod_py
,
lod
)
def
test_lod_tensor_init
(
self
):
def
test_lod_tensor_init
(
self
):
scope
=
core
.
Scope
()
place
=
core
.
CPUPlace
()
place
=
core
.
CPUPlace
()
lod_py
=
[[
2
,
1
],
[
1
,
2
,
2
]]
lod_py
=
[[
2
,
1
],
[
1
,
2
,
2
]]
lod_tensor
=
core
.
LoDTensor
()
lod_tensor
=
core
.
LoDTensor
()
lod_tensor
.
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
_
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
set_recursive_sequence_lengths
(
lod_py
)
lod_tensor
.
set_recursive_sequence_lengths
(
lod_py
)
lod_tensor
.
alloc_float
(
place
)
lod_tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
[
0
,
0
,
0
,
0
]
=
1.0
tensor_array
[
0
,
0
,
0
,
0
]
=
1.0
tensor_array
[
0
,
0
,
0
,
1
]
=
2.0
tensor_array
[
0
,
0
,
0
,
1
]
=
2.0
...
@@ -129,9 +128,9 @@ class TestTensor(unittest.TestCase):
...
@@ -129,9 +128,9 @@ class TestTensor(unittest.TestCase):
lod_py
=
[[
2
,
1
],
[
1
,
2
,
2
]]
lod_py
=
[[
2
,
1
],
[
1
,
2
,
2
]]
lod_tensor
=
core
.
LoDTensor
()
lod_tensor
=
core
.
LoDTensor
()
lod_tensor
.
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
_
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
set_recursive_sequence_lengths
(
lod_py
)
lod_tensor
.
set_recursive_sequence_lengths
(
lod_py
)
lod_tensor
.
alloc_float
(
place
)
lod_tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
[
0
,
0
,
0
,
0
]
=
1.0
tensor_array
[
0
,
0
,
0
,
0
]
=
1.0
tensor_array
[
0
,
0
,
0
,
1
]
=
2.0
tensor_array
[
0
,
0
,
0
,
1
]
=
2.0
...
@@ -149,15 +148,15 @@ class TestTensor(unittest.TestCase):
...
@@ -149,15 +148,15 @@ class TestTensor(unittest.TestCase):
tensor
=
var
.
get_tensor
()
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
0
,
1
])
tensor
.
_
set_dims
([
0
,
1
])
tensor
.
alloc_float
(
place
)
tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
tensor
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
0
,
1
),
tensor_array
.
shape
)
self
.
assertEqual
((
0
,
1
),
tensor_array
.
shape
)
if
core
.
is_compiled_with_cuda
():
if
core
.
is_compiled_with_cuda
():
gpu_place
=
core
.
CUDAPlace
(
0
)
gpu_place
=
core
.
CUDAPlace
(
0
)
tensor
.
alloc_float
(
gpu_place
)
tensor
.
_
alloc_float
(
gpu_place
)
tensor_array
=
numpy
.
array
(
tensor
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
0
,
1
),
tensor_array
.
shape
)
self
.
assertEqual
((
0
,
1
),
tensor_array
.
shape
)
...
...
python/paddle/fluid/tests/unittests/testsuite.py
浏览文件 @
ba997b8c
...
@@ -75,7 +75,7 @@ def set_input(scope, op, inputs, place):
...
@@ -75,7 +75,7 @@ def set_input(scope, op, inputs, place):
if
isinstance
(
var
,
tuple
):
if
isinstance
(
var
,
tuple
):
tensor
.
set_recursive_sequence_lengths
(
var
[
1
])
tensor
.
set_recursive_sequence_lengths
(
var
[
1
])
var
=
var
[
0
]
var
=
var
[
0
]
tensor
.
set_dims
(
var
.
shape
)
tensor
.
_
set_dims
(
var
.
shape
)
tensor
.
set
(
var
,
place
)
tensor
.
set
(
var
,
place
)
elif
isinstance
(
var
,
float
):
elif
isinstance
(
var
,
float
):
scope
.
find_var
(
var_name
).
set_float
(
var
)
scope
.
find_var
(
var_name
).
set_float
(
var
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录