Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ba997b8c
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ba997b8c
编写于
7月 14, 2018
作者:
Y
Yu Yang
提交者:
yuyang18
7月 14, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #12097 from reyoung/feature/hide_api_cont
Hide internal API of LoDTensors, Clipping, etc.
上级
bc8c7ccd
4ff1bde5
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
244 addition
and
210 deletion
+244
-210
paddle/fluid/operators/reader/open_files_op.cc
paddle/fluid/operators/reader/open_files_op.cc
+158
-119
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+14
-14
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+15
-15
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+11
-11
python/paddle/fluid/param_attr.py
python/paddle/fluid/param_attr.py
+9
-9
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+1
-4
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+8
-8
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+0
-1
python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py
.../paddle/fluid/tests/unittests/test_dynrnn_static_input.py
+7
-7
python/paddle/fluid/tests/unittests/test_selected_rows.py
python/paddle/fluid/tests/unittests/test_selected_rows.py
+3
-3
python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
...on/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
+2
-2
python/paddle/fluid/tests/unittests/test_tensor.py
python/paddle/fluid/tests/unittests/test_tensor.py
+15
-16
python/paddle/fluid/tests/unittests/testsuite.py
python/paddle/fluid/tests/unittests/testsuite.py
+1
-1
未找到文件。
paddle/fluid/operators/reader/open_files_op.cc
浏览文件 @
ba997b8c
...
...
@@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include <thread> // NOLINT
#include "ThreadPool.h"
#include "paddle/fluid/framework/blocking_queue.h"
#include "paddle/fluid/operators/reader/blocking_queue.h"
#include "paddle/fluid/operators/reader/reader_op_registry.h"
...
...
@@ -21,141 +23,171 @@ namespace paddle {
namespace
operators
{
namespace
reader
{
class
MultiFileReader
:
public
framework
::
ReaderBase
{
class
IReaderContainer
{
public:
MultiFileReader
(
const
std
::
vector
<
std
::
string
>&
file_names
,
size_t
thread_num
,
size_t
buffer_size
)
:
buffer_size_
(
buffer_size
)
{
readers_
.
reserve
(
file_names
.
size
());
for
(
const
std
::
string
&
f_name
:
file_names
)
{
readers_
.
emplace_back
(
CreateReaderByFileName
(
f_name
));
virtual
~
IReaderContainer
()
{}
virtual
void
AppendReader
(
std
::
unique_ptr
<
framework
::
ReaderBase
>&&
readers
)
=
0
;
virtual
void
Stop
()
=
0
;
virtual
void
Start
()
=
0
;
virtual
void
ReadNext
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
=
0
;
};
class
OrderedReaderContainer
:
public
IReaderContainer
{
public:
void
AppendReader
(
std
::
unique_ptr
<
framework
::
ReaderBase
>&&
reader
)
override
{
pending_
.
emplace
(
std
::
move
(
reader
));
}
void
Stop
()
override
{
while
(
!
pending_
.
empty
())
{
MoveFrontPendingToDone
();
}
prefetchers_
.
resize
(
thread_num
);
StartNewScheduler
();
}
void
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
;
void
Start
()
override
{
std
::
swap
(
done_
,
pending_
);
}
~
MultiFileReader
()
{
EndScheduler
();
}
void
ReadNext
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
{
if
(
!
pending_
.
empty
())
{
pending_
.
front
()
->
ReadNext
(
out
);
if
(
out
->
empty
())
{
MoveFrontPendingToDone
();
ReadNext
(
out
);
}
}
else
{
out
->
clear
();
}
}
private:
void
ShutdownImpl
()
override
{
EndScheduler
();
}
void
StartImpl
()
override
{
StartNewScheduler
();
}
void
StartNewScheduler
();
void
EndScheduler
();
void
ScheduleThreadFunc
();
void
PrefetchThreadFunc
(
size_t
reader_idx
,
size_t
thread_idx
);
std
::
vector
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
readers_
;
std
::
thread
scheduler_
;
std
::
vector
<
std
::
thread
>
prefetchers_
;
size_t
buffer_size_
;
reader
::
BlockingQueue
<
size_t
>*
waiting_reader_idx_
;
reader
::
BlockingQueue
<
size_t
>*
available_thread_idx_
;
reader
::
BlockingQueue
<
std
::
vector
<
framework
::
LoDTensor
>>*
buffer_
;
void
MoveFrontPendingToDone
()
{
pending_
.
front
()
->
Shutdown
();
pending_
.
front
()
->
Start
();
done_
.
emplace
(
move
(
pending_
.
front
()));
pending_
.
pop
();
}
std
::
queue
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
pending_
;
std
::
queue
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
done_
;
};
void
MultiFileReader
::
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
{
if
(
!
buffer_
->
Receive
(
out
))
{
out
->
clear
();
}
}
class
PreemptiveReaderContainer
:
public
IReaderContainer
{
using
ReaderList
=
std
::
list
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
;
void
MultiFileReader
::
StartNewScheduler
()
{
size_t
thread_num
=
prefetchers_
.
size
();
waiting_reader_idx_
=
new
reader
::
BlockingQueue
<
size_t
>
(
readers_
.
size
());
available_thread_idx_
=
new
reader
::
BlockingQueue
<
size_t
>
(
thread_num
);
buffer_
=
new
reader
::
BlockingQueue
<
std
::
vector
<
framework
::
LoDTensor
>>
(
buffer_size_
);
struct
FutureItem
{
std
::
vector
<
framework
::
LoDTensor
>
data_
;
ReaderList
::
iterator
reader_it_
;
};
for
(
size_t
i
=
0
;
i
<
readers_
.
size
();
++
i
)
{
waiting_reader_idx_
->
Send
(
i
);
}
waiting_reader_idx_
->
Close
();
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
available_thread_idx_
->
Send
(
i
);
}
using
FutureList
=
std
::
list
<
std
::
future
<
FutureItem
>>
;
scheduler_
=
std
::
thread
([
this
]
{
ScheduleThreadFunc
();
});
}
public:
explicit
PreemptiveReaderContainer
(
size_t
thread_num
)
:
pool_
(
thread_num
)
{
}
void
MultiFileReader
::
EndScheduler
()
{
available_thread_idx_
->
Close
();
buffer_
->
Close
();
waiting_reader_idx_
->
Close
();
if
(
scheduler_
.
joinable
())
{
scheduler_
.
join
();
}
delete
buffer_
;
delete
available_thread_idx_
;
delete
waiting_reader_idx_
;
}
void
MultiFileReader
::
ScheduleThreadFunc
()
{
VLOG
(
5
)
<<
"MultiFileReader schedule thread starts."
;
size_t
completed_thread_num
=
0
;
size_t
thread_idx
;
while
(
available_thread_idx_
->
Receive
(
&
thread_idx
))
{
std
::
thread
&
prefetcher
=
prefetchers_
[
thread_idx
];
if
(
prefetcher
.
joinable
())
{
prefetcher
.
join
();
}
size_t
reader_idx
;
if
(
waiting_reader_idx_
->
Receive
(
&
reader_idx
))
{
// Still have files to read. Start a new prefetch thread.
prefetcher
=
std
::
thread
([
this
,
reader_idx
,
thread_idx
]
{
PrefetchThreadFunc
(
reader_idx
,
thread_idx
);
});
}
else
{
// No more file to read.
++
completed_thread_num
;
if
(
completed_thread_num
==
prefetchers_
.
size
())
{
buffer_
->
Close
();
break
;
void
Stop
()
override
{
if
(
!
pending_
.
empty
())
{
for
(
auto
&
reader
:
pending_
)
{
reader
->
Shutdown
();
}
for
(
auto
&
fu
:
futures_
)
{
fu
.
wait
();
}
futures_
.
clear
();
for
(
auto
&
reader
:
pending_
)
{
reader
->
Start
();
done_
.
emplace_back
(
std
::
move
(
reader
));
}
pending_
.
clear
();
bool
timeout
;
complete_queue_
.
PopAll
(
1000
,
&
timeout
);
PADDLE_ENFORCE
(
!
timeout
);
}
}
// If users invoke Shutdown() when scheduler is running, it will close the
// 'avaiable_thread_idx_' and prefecther threads have no way to tell scheduler
// to release their resource. So a check is needed before scheduler ends.
for
(
auto
&
p
:
prefetchers_
)
{
if
(
p
.
joinable
())
{
p
.
join
();
void
Start
()
override
{
for
(
auto
&
reader
:
done_
)
{
AppendReader
(
std
::
move
(
reader
));
}
done_
.
clear
();
}
VLOG
(
5
)
<<
"MultiFileReader schedule thread terminates."
;
}
void
MultiFileReader
::
PrefetchThreadFunc
(
size_t
reader_idx
,
size_t
thread_idx
)
{
VLOG
(
5
)
<<
"The prefetch thread of file idx '"
<<
reader_idx
<<
"' starts."
;
std
::
unique_ptr
<
framework
::
ReaderBase
>&
reader
=
readers_
[
reader_idx
];
while
(
true
)
{
std
::
vector
<
framework
::
LoDTensor
>
ins
;
reader
->
ReadNext
(
&
ins
);
if
(
ins
.
empty
())
{
reader
->
Shutdown
();
reader
->
Start
();
break
;
void
ReadNext
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
{
if
(
!
pending_
.
empty
())
{
auto
future_it
=
complete_queue_
.
Pop
();
FutureItem
item
=
future_it
->
get
();
if
(
item
.
data_
.
empty
())
{
// reader done.
done_
.
emplace_back
(
std
::
move
(
*
item
.
reader_it_
));
pending_
.
erase
(
item
.
reader_it_
);
futures_
.
erase
(
future_it
);
ReadNext
(
out
);
}
else
{
*
out
=
item
.
data_
;
// continue read async
AsyncRead
(
item
.
reader_it_
,
&
future_it
);
}
}
else
{
out
->
clear
();
}
try
{
buffer_
->
Send
(
std
::
move
(
ins
));
}
catch
(
paddle
::
platform
::
EnforceNotMet
e
)
{
VLOG
(
5
)
<<
"WARNING: The buffer channel has been closed. The prefetch "
"thread of file idx '"
<<
reader_idx
<<
"' will terminate."
;
break
;
}
private:
void
AppendReader
(
std
::
unique_ptr
<
framework
::
ReaderBase
>&&
readers
)
override
{
pending_
.
emplace_back
();
auto
reader_it
=
pending_
.
end
();
--
reader_it
;
futures_
.
emplace_back
();
auto
future_it
=
futures_
.
end
();
--
future_it
;
AsyncRead
(
reader_it
,
&
future_it
);
}
void
AsyncRead
(
const
ReaderList
::
iterator
&
reader_it
,
FutureList
::
iterator
*
future_it_ptr
)
{
auto
&
future_it
=
*
future_it_ptr
;
*
future_it
=
pool_
.
enqueue
([
reader_it
,
future_it
,
this
]
{
FutureItem
item
;
item
.
reader_it_
=
reader_it
;
(
*
reader_it
)
->
ReadNext
(
&
item
.
data_
);
if
(
item
.
data_
.
empty
())
{
(
*
reader_it
)
->
Shutdown
();
(
*
reader_it
)
->
Start
();
}
complete_queue_
.
Push
(
future_it
);
return
item
;
});
}
FutureList
futures_
;
ThreadPool
pool_
;
framework
::
BlockingQueue
<
FutureList
::
iterator
>
complete_queue_
;
std
::
list
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
pending_
;
std
::
list
<
std
::
unique_ptr
<
framework
::
ReaderBase
>>
done_
;
};
class
MultiFileReader
:
public
framework
::
ReaderBase
{
public:
MultiFileReader
(
const
std
::
vector
<
std
::
string
>&
file_names
,
std
::
unique_ptr
<
IReaderContainer
>&&
container
)
:
container_
(
std
::
move
(
container
))
{
for
(
auto
&
fn
:
file_names
)
{
container_
->
AppendReader
(
CreateReaderByFileName
(
fn
));
}
}
if
(
!
available_thread_idx_
->
Send
(
thread_idx
))
{
VLOG
(
5
)
<<
"WARNING: The available_thread_idx_ channel has been closed. "
"Fail to send thread_idx."
;
~
MultiFileReader
()
{
container_
->
Stop
();
}
protected:
void
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
{
container_
->
ReadNext
(
out
);
}
VLOG
(
5
)
<<
"The prefetch thread of file idx '"
<<
reader_idx
<<
"' terminates."
;
}
void
ShutdownImpl
()
override
{
container_
->
Stop
();
}
void
StartImpl
()
override
{
container_
->
Start
();
}
private:
std
::
unique_ptr
<
IReaderContainer
>
container_
;
};
class
OpenFilesOp
:
public
framework
::
OperatorBase
{
public:
...
...
@@ -173,13 +205,22 @@ class OpenFilesOp : public framework::OperatorBase {
"shape concat's length."
);
const
auto
&
file_names
=
Attr
<
std
::
vector
<
std
::
string
>>
(
"file_names"
);
PADDLE_ENFORCE
(
!
file_names
.
empty
(),
"No file to be read!"
);
const
size_t
thread_num
=
Attr
<
int
>
(
"thread_num"
);
const
size_t
buffer_size
=
Attr
<
int
>
(
"buffer_size"
);
bool
is_test
=
Attr
<
bool
>
(
"is_test"
);
auto
*
out
=
scope
.
FindVar
(
Output
(
"Out"
))
->
template
GetMutable
<
framework
::
ReaderHolder
>();
std
::
unique_ptr
<
IReaderContainer
>
container
;
if
(
is_test
)
{
container
.
reset
(
new
OrderedReaderContainer
());
}
else
{
container
.
reset
(
new
PreemptiveReaderContainer
(
std
::
min
(
file_names
.
size
(),
static_cast
<
size_t
>
(
std
::
thread
::
hardware_concurrency
()))));
}
out
->
Reset
(
std
::
make_shared
<
MultiFileReader
>
(
file_names
,
thread_num
,
buffer_size
));
std
::
make_shared
<
MultiFileReader
>
(
file_names
,
std
::
move
(
container
)
));
}
};
...
...
@@ -187,9 +228,7 @@ class OpenFilesOpMaker : public FileReaderMakerBase {
protected:
void
Apply
()
override
{
AddAttr
<
std
::
vector
<
std
::
string
>>
(
"file_names"
,
"Files to be read."
);
AddAttr
<
int
>
(
"thread_num"
,
"The maximal concurrent prefetch thread number."
)
.
GreaterThan
(
0
);
AddAttr
<
int
>
(
"buffer_size"
,
"The size of prefetch buffer."
).
GreaterThan
(
0
);
AddAttr
<
bool
>
(
"is_test"
,
"Used for testing data."
).
SetDefault
(
false
);
AddComment
(
R"DOC(
OpenFiles Operator
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
ba997b8c
...
...
@@ -87,37 +87,37 @@ PYBIND11_PLUGIN(core) {
py
::
class_
<
Tensor
>
(
m
,
"Tensor"
,
py
::
buffer_protocol
())
.
def_buffer
(
[](
Tensor
&
self
)
->
py
::
buffer_info
{
return
CastToPyBuffer
(
self
);
})
.
def
(
"get_dims"
,
.
def
(
"
_
get_dims"
,
[](
const
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_dims"
,
.
def
(
"
_
set_dims"
,
[](
Tensor
&
self
,
const
std
::
vector
<
int64_t
>
&
dim
)
{
self
.
Resize
(
make_ddim
(
dim
));
})
.
def
(
"set_layout"
,
.
def
(
"
_
set_layout"
,
[](
Tensor
&
self
,
const
std
::
string
&
layout
)
{
self
.
set_layout
(
StringToDataLayout
(
layout
));
})
.
def
(
"alloc_float"
,
.
def
(
"
_
alloc_float"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
self
.
mutable_data
<
float
>
(
place
);
})
.
def
(
"alloc_float"
,
.
def
(
"
_
alloc_float"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
)
{
self
.
mutable_data
<
float
>
(
place
);
})
.
def
(
"alloc_int"
,
.
def
(
"
_
alloc_int"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
)
{
self
.
mutable_data
<
int
>
(
place
);
})
.
def
(
"alloc_int"
,
.
def
(
"
_
alloc_int"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
)
{
self
.
mutable_data
<
int
>
(
place
);
})
.
def
(
"alloc_int"
,
.
def
(
"
_
alloc_int"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
self
.
mutable_data
<
int
>
(
place
);
})
.
def
(
"alloc_float"
,
.
def
(
"
_
alloc_float"
,
[](
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
)
{
self
.
mutable_data
<
float
>
(
place
);
})
...
...
@@ -145,11 +145,11 @@ PYBIND11_PLUGIN(core) {
.
def
(
"set"
,
PyCUDAPinnedTensorSetFromArray
<
uint8_t
>
)
#endif
.
def
(
"shape"
,
[](
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_float_element"
,
TensorSetElement
<
float
>
)
.
def
(
"get_float_element"
,
TensorGetElement
<
float
>
)
.
def
(
"set_double_element"
,
TensorSetElement
<
double
>
)
.
def
(
"get_double_element"
,
TensorGetElement
<
double
>
)
.
def
(
"dtype"
,
[](
Tensor
&
self
)
{
return
ToDataType
(
self
.
type
());
});
.
def
(
"
_
set_float_element"
,
TensorSetElement
<
float
>
)
.
def
(
"
_
get_float_element"
,
TensorGetElement
<
float
>
)
.
def
(
"
_
set_double_element"
,
TensorSetElement
<
double
>
)
.
def
(
"
_
get_double_element"
,
TensorGetElement
<
double
>
)
.
def
(
"
_
dtype"
,
[](
Tensor
&
self
)
{
return
ToDataType
(
self
.
type
());
});
py
::
class_
<
LoDTensor
,
Tensor
>
(
m
,
"LoDTensor"
)
.
def_buffer
(
...
...
python/paddle/fluid/clip.py
浏览文件 @
ba997b8c
...
...
@@ -31,7 +31,7 @@ class BaseErrorClipAttr(object):
def
__str__
(
self
):
raise
NotImplementedError
()
def
append_clip_op
(
self
,
block
,
grad_name
):
def
_
append_clip_op
(
self
,
block
,
grad_name
):
raise
NotImplementedError
()
...
...
@@ -67,7 +67,7 @@ class ErrorClipByValue(BaseErrorClipAttr):
def
__str__
(
self
):
return
"ByValue, min=%f, max=%f"
%
(
self
.
min
,
self
.
max
)
def
append_clip_op
(
self
,
block
,
grad_name
):
def
_
append_clip_op
(
self
,
block
,
grad_name
):
clip_op_desc
=
block
.
desc
.
append_op
()
clip_op_desc
.
set_type
(
"clip"
)
clip_op_desc
.
set_input
(
"X"
,
[
grad_name
])
...
...
@@ -90,17 +90,17 @@ def error_clip_callback(block, context):
"Variable's error_clip should be an instance of BaseErrorClipAttr or None."
)
if
error_clip
is
not
None
:
error_clip
.
append_clip_op
(
block
,
grad_n
)
error_clip
.
_
append_clip_op
(
block
,
grad_n
)
class
BaseGradientClipAttr
(
object
):
def
__str__
(
self
):
raise
NotImplementedError
()
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
raise
NotImplementedError
()
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
raise
NotImplementedError
()
...
...
@@ -108,10 +108,10 @@ class NullGradientClipAttr(BaseGradientClipAttr):
def
__str__
(
self
):
return
"Null"
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
pass
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
return
param
,
grad
...
...
@@ -153,10 +153,10 @@ class GradientClipByValue(BaseGradientClipAttr):
def
__str__
(
self
):
return
"ByValue, min=%f, max=%f"
%
(
self
.
min
,
self
.
max
)
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
pass
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
new_grad
=
layers
.
clip
(
x
=
grad
,
min
=
self
.
min
,
max
=
self
.
max
)
return
param
,
new_grad
...
...
@@ -199,10 +199,10 @@ class GradientClipByNorm(BaseGradientClipAttr):
def
__str__
(
self
):
return
"ByNorm, clip_norm=%f"
%
self
.
clip_norm
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
pass
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
new_grad
=
layers
.
clip_by_norm
(
x
=
grad
,
max_norm
=
self
.
clip_norm
)
return
param
,
new_grad
...
...
@@ -257,7 +257,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
return
"ByGlobalNorm, group_name=%s, clip_norm=%f"
%
(
self
.
group_name
,
self
.
clip_norm
)
def
process_context
(
self
,
context
,
param
,
grad
):
def
_
process_context
(
self
,
context
,
param
,
grad
):
if
self
.
group_name
not
in
context
:
context
[
self
.
group_name
]
=
[]
context
[
self
.
group_name
+
"_clip_value"
]
=
self
.
clip_norm
...
...
@@ -274,7 +274,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
self
.
context
=
context
def
create_operators
(
self
,
param
,
grad
):
def
_
create_operators
(
self
,
param
,
grad
):
group_scale_name
=
self
.
group_name
+
"_scale"
if
group_scale_name
not
in
self
.
context
:
group_norm_var
=
layers
.
sums
(
input
=
self
.
context
[
self
.
group_name
])
...
...
@@ -336,12 +336,12 @@ def append_gradient_clip_ops(param_grad):
"clip attribute should be an instance of BaseGradientClipAttr"
)
clip_attr
.
process_context
(
context
=
context
,
param
=
p
,
grad
=
g
)
clip_attr
.
_
process_context
(
context
=
context
,
param
=
p
,
grad
=
g
)
res
=
[]
for
p
,
g
in
param_grad
:
with
p
.
block
.
program
.
optimized_guard
(
p
):
res
.
append
(
clip_attr
.
create_operators
(
param
=
p
,
grad
=
g
))
res
.
append
(
clip_attr
.
_
create_operators
(
param
=
p
,
grad
=
g
))
return
res
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
ba997b8c
...
...
@@ -68,11 +68,11 @@ class LayerHelper(object):
@
property
def
param_attr
(
self
):
return
ParamAttr
.
to_attr
(
self
.
kwargs
.
get
(
'param_attr'
,
None
))
return
ParamAttr
.
_
to_attr
(
self
.
kwargs
.
get
(
'param_attr'
,
None
))
@
property
def
bias_attr
(
self
):
return
ParamAttr
.
to_attr
(
self
.
kwargs
.
get
(
'bias_attr'
,
None
))
return
ParamAttr
.
_
to_attr
(
self
.
kwargs
.
get
(
'bias_attr'
,
None
))
def
multiple_param_attr
(
self
,
length
):
param_attr
=
self
.
param_attr
...
...
@@ -262,11 +262,11 @@ class LayerHelper(object):
g_param
=
self
.
startup_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
g_param_shape
,
**
g_param_attr
.
to_kwargs
(
with_initializer
=
False
))
**
g_param_attr
.
_
to_kwargs
(
with_initializer
=
False
))
v_param
=
self
.
startup_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
v_param_shape
,
**
v_param_attr
.
to_kwargs
(
with_initializer
=
True
))
**
v_param_attr
.
_
to_kwargs
(
with_initializer
=
True
))
__norm_except_dim
(
x
=
v_param
,
out
=
g_param
,
...
...
@@ -275,9 +275,9 @@ class LayerHelper(object):
# Add weight normalization to main_program
g_param
=
self
.
main_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
g_param_shape
,
**
g_param_attr
.
to_kwargs
())
dtype
=
dtype
,
shape
=
g_param_shape
,
**
g_param_attr
.
_
to_kwargs
())
v_param
=
self
.
main_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
v_param_shape
,
**
v_param_attr
.
to_kwargs
())
dtype
=
dtype
,
shape
=
v_param_shape
,
**
v_param_attr
.
_
to_kwargs
())
w_param
=
__weight_normalize
(
g_param
,
v_param
,
dim
=
attr
.
dim
)
return
w_param
...
...
@@ -296,11 +296,11 @@ class LayerHelper(object):
if
default_initializer
is
None
and
attr
.
initializer
is
None
:
if
is_bias
:
attr
.
set_default_bias_initializer
()
attr
.
_
set_default_bias_initializer
()
else
:
attr
.
set_default_param_initializer
()
attr
.
_
set_default_param_initializer
()
else
:
attr
.
set_default_initializer
(
default_initializer
)
attr
.
_
set_default_initializer
(
default_initializer
)
# If weight normalization is set, insert extra parameters and ops.
# Refer to https://arxiv.org/pdf/1602.07868.pdf
...
...
@@ -310,9 +310,9 @@ class LayerHelper(object):
return
param
self
.
startup_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
to_kwargs
(
with_initializer
=
True
))
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
_
to_kwargs
(
with_initializer
=
True
))
return
self
.
main_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
to_kwargs
())
dtype
=
dtype
,
shape
=
shape
,
**
attr
.
_
to_kwargs
())
def
get_parameter
(
self
,
name
):
param
=
self
.
main_program
.
global_block
().
var
(
name
)
...
...
python/paddle/fluid/param_attr.py
浏览文件 @
ba997b8c
...
...
@@ -67,7 +67,7 @@ class ParamAttr(object):
self
.
gradient_clip
=
gradient_clip
self
.
model_average
=
do_model_average
def
set_default_initializer
(
self
,
initializer
):
def
_
set_default_initializer
(
self
,
initializer
):
"""
Set the default initializer, the initializer should be Constant,
Uniform, Normal, Xavier, MSRA.
...
...
@@ -88,7 +88,7 @@ class ParamAttr(object):
self
.
initializer
=
initializer
def
set_default_param_initializer
(
self
):
def
_
set_default_param_initializer
(
self
):
"""
Set the default initializer for the parameter with Xavier.
...
...
@@ -98,9 +98,9 @@ class ParamAttr(object):
Returns:
None.
"""
self
.
set_default_initializer
(
Xavier
())
self
.
_
set_default_initializer
(
Xavier
())
def
set_default_bias_initializer
(
self
):
def
_
set_default_bias_initializer
(
self
):
"""
Set the default initializer for the bias with Constant(0.0).
...
...
@@ -110,10 +110,10 @@ class ParamAttr(object):
Returns:
None.
"""
self
.
set_default_initializer
(
Constant
(
0.0
))
self
.
_
set_default_initializer
(
Constant
(
0.0
))
@
staticmethod
def
to_attr
(
arg
):
def
_
to_attr
(
arg
):
"""
Create ParamAttr[s].
...
...
@@ -131,7 +131,7 @@ class ParamAttr(object):
if
arg
is
None
:
return
ParamAttr
()
elif
isinstance
(
arg
,
list
)
or
isinstance
(
arg
,
tuple
):
return
[
ParamAttr
.
to_attr
(
a
)
for
a
in
arg
]
return
[
ParamAttr
.
_
to_attr
(
a
)
for
a
in
arg
]
elif
isinstance
(
arg
,
ParamAttr
):
return
arg
elif
isinstance
(
arg
,
str
)
or
isinstance
(
arg
,
unicode
):
...
...
@@ -141,11 +141,11 @@ class ParamAttr(object):
elif
isinstance
(
arg
,
WeightDecayRegularizer
):
return
ParamAttr
(
regularizer
=
arg
)
elif
isinstance
(
arg
,
bool
):
return
ParamAttr
.
to_attr
(
None
)
if
arg
else
False
return
ParamAttr
.
_
to_attr
(
None
)
if
arg
else
False
else
:
raise
TypeError
(
"{0} cast to ParamAttr"
.
format
(
type
(
arg
)))
def
to_kwargs
(
self
,
with_initializer
=
False
):
def
_
to_kwargs
(
self
,
with_initializer
=
False
):
"""
Returns the attributes of this parameter.
...
...
python/paddle/fluid/regularizer.py
浏览文件 @
ba997b8c
...
...
@@ -15,10 +15,7 @@
import
framework
from
.
import
core
__all__
=
[
'append_regularization_ops'
,
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
__all__
=
[
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
def
append_regularization_ops
(
parameters_and_grads
,
regularization
=
None
):
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
ba997b8c
...
...
@@ -60,8 +60,8 @@ def get_numeric_gradient(place,
return
np
.
array
(
sum
).
mean
()
tensor_to_check
=
scope
.
find_var
(
input_to_check
).
get_tensor
()
tensor_size
=
product
(
tensor_to_check
.
get_dims
())
tensor_to_check_dtype
=
tensor_to_check
.
dtype
()
tensor_size
=
product
(
tensor_to_check
.
shape
())
tensor_to_check_dtype
=
tensor_to_check
.
_
dtype
()
if
tensor_to_check_dtype
==
core
.
VarDesc
.
VarType
.
FP32
:
tensor_to_check_dtype
=
np
.
float32
elif
tensor_to_check_dtype
==
core
.
VarDesc
.
VarType
.
FP64
:
...
...
@@ -74,15 +74,15 @@ def get_numeric_gradient(place,
def
__get_elem__
(
tensor
,
i
):
if
tensor_to_check_dtype
==
np
.
float32
:
return
tensor
.
get_float_element
(
i
)
return
tensor
.
_
get_float_element
(
i
)
else
:
return
tensor
.
get_double_element
(
i
)
return
tensor
.
_
get_double_element
(
i
)
def
__set_elem__
(
tensor
,
i
,
e
):
if
tensor_to_check_dtype
==
np
.
float32
:
tensor
.
set_float_element
(
i
,
e
)
tensor
.
_
set_float_element
(
i
,
e
)
else
:
tensor
.
set_double_element
(
i
,
e
)
tensor
.
_
set_double_element
(
i
,
e
)
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
...
...
@@ -107,7 +107,7 @@ def get_numeric_gradient(place,
__set_elem__
(
tensor_to_check
,
i
,
origin
)
gradient_flat
[
i
]
=
(
y_pos
-
y_neg
)
/
delta
/
2
return
gradient_flat
.
reshape
(
tensor_to_check
.
get_dims
())
return
gradient_flat
.
reshape
(
tensor_to_check
.
shape
())
class
OpTest
(
unittest
.
TestCase
):
...
...
@@ -125,7 +125,7 @@ class OpTest(unittest.TestCase):
@
classmethod
def
tearDownClass
(
cls
):
'''Restore random seeds'''
"""Restore random seeds"""
np
.
random
.
set_state
(
cls
.
_np_rand_state
)
random
.
setstate
(
cls
.
_py_rand_state
)
...
...
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
ba997b8c
...
...
@@ -129,7 +129,6 @@ def create_or_get_tensor(scope, var_name, var, place):
if
var
is
not
None
:
assert
isinstance
(
var
,
np
.
ndarray
)
tensor
.
set_recursive_sequence_lengths
([])
tensor
.
set_dims
(
var
.
shape
)
tensor
.
set
(
var
,
place
)
return
tensor
...
...
python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py
浏览文件 @
ba997b8c
...
...
@@ -65,10 +65,10 @@ class TestDyRnnStaticInput(unittest.TestCase):
return
self
.
_lodtensor_to_ndarray
(
fetch_outs
[
0
])
def
_lodtensor_to_ndarray
(
self
,
lod_tensor
):
dims
=
lod_tensor
.
get_dims
()
dims
=
lod_tensor
.
shape
()
ndarray
=
np
.
zeros
(
shape
=
dims
).
astype
(
'float32'
)
for
i
in
xrange
(
np
.
product
(
dims
)):
ndarray
.
ravel
()[
i
]
=
lod_tensor
.
get_float_element
(
i
)
ndarray
.
ravel
()[
i
]
=
lod_tensor
.
_
get_float_element
(
i
)
return
ndarray
,
lod_tensor
.
recursive_sequence_lengths
()
def
build_graph
(
self
,
only_forward
=
False
):
...
...
@@ -185,19 +185,19 @@ class TestDyRnnStaticInput(unittest.TestCase):
actual_gradients
,
actual_lod
=
self
.
fetch_value
(
static_input_grad
)
static_input_shape
=
self
.
static_input_tensor
.
get_dims
()
static_input_shape
=
self
.
static_input_tensor
.
shape
()
numeric_gradients
=
np
.
zeros
(
shape
=
static_input_shape
).
astype
(
'float32'
)
# calculate numeric gradients
tensor_size
=
np
.
product
(
static_input_shape
)
for
i
in
xrange
(
tensor_size
):
origin
=
self
.
static_input_tensor
.
get_float_element
(
i
)
origin
=
self
.
static_input_tensor
.
_
get_float_element
(
i
)
x_pos
=
origin
+
self
.
_delta
self
.
static_input_tensor
.
set_float_element
(
i
,
x_pos
)
self
.
static_input_tensor
.
_
set_float_element
(
i
,
x_pos
)
y_pos
=
self
.
fetch_value
(
loss
)[
0
][
0
]
x_neg
=
origin
-
self
.
_delta
self
.
static_input_tensor
.
set_float_element
(
i
,
x_neg
)
self
.
static_input_tensor
.
_
set_float_element
(
i
,
x_neg
)
y_neg
=
self
.
fetch_value
(
loss
)[
0
][
0
]
self
.
static_input_tensor
.
set_float_element
(
i
,
origin
)
self
.
static_input_tensor
.
_
set_float_element
(
i
,
origin
)
numeric_gradients
.
ravel
()[
i
]
=
(
y_pos
-
y_neg
)
/
self
.
_delta
/
2
self
.
assertTrue
(
np
.
allclose
(
actual_gradients
,
numeric_gradients
,
0.001
))
self
.
assertTrue
(
...
...
python/paddle/fluid/tests/unittests/test_selected_rows.py
浏览文件 @
ba997b8c
...
...
@@ -40,12 +40,12 @@ class TestSelectedRows(unittest.TestCase):
# compare tensor
self
.
assertAlmostEqual
(
2.0
,
selected_rows
.
get_tensor
().
get_float_element
(
0
))
selected_rows
.
get_tensor
().
_
get_float_element
(
0
))
self
.
assertAlmostEqual
(
1.0
,
selected_rows
.
get_tensor
().
get_float_element
(
1
))
selected_rows
.
get_tensor
().
_
get_float_element
(
1
))
self
.
assertAlmostEqual
(
4.0
,
selected_rows
.
get_tensor
().
get_float_element
(
2
*
row_numel
+
8
))
selected_rows
.
get_tensor
().
_
get_float_element
(
2
*
row_numel
+
8
))
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
浏览文件 @
ba997b8c
...
...
@@ -45,8 +45,8 @@ class TestShrinkRNNMemoryBase(unittest.TestCase):
def
sum_lodtensor
(
self
,
tensor
):
sum_res
=
0.0
for
i
in
xrange
(
np
.
product
(
tensor
.
get_dims
())):
sum_res
+=
tensor
.
get_float_element
(
i
)
for
i
in
xrange
(
np
.
product
(
tensor
.
shape
())):
sum_res
+=
tensor
.
_
get_float_element
(
i
)
return
sum_res
...
...
python/paddle/fluid/tests/unittests/test_tensor.py
浏览文件 @
ba997b8c
...
...
@@ -25,8 +25,8 @@ class TestTensor(unittest.TestCase):
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
1000
,
784
])
tensor
.
alloc_int
(
place
)
tensor
.
_
set_dims
([
1000
,
784
])
tensor
.
_
alloc_int
(
place
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
1000
,
784
),
tensor_array
.
shape
)
tensor_array
[
3
,
9
]
=
1
...
...
@@ -44,8 +44,8 @@ class TestTensor(unittest.TestCase):
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
1000
,
784
])
tensor
.
alloc_float
(
place
)
tensor
.
_
set_dims
([
1000
,
784
])
tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
1000
,
784
),
tensor_array
.
shape
)
...
...
@@ -63,8 +63,8 @@ class TestTensor(unittest.TestCase):
var_lod
=
scope
.
var
(
"test_lod_tensor"
)
lod_tensor
=
var_lod
.
get_tensor
()
lod_tensor
.
set_dims
([
4
,
4
,
6
])
lod_tensor
.
alloc_int
(
place
)
lod_tensor
.
_
set_dims
([
4
,
4
,
6
])
lod_tensor
.
_
alloc_int
(
place
)
array
=
numpy
.
array
(
lod_tensor
)
array
[
0
,
0
,
0
]
=
3
array
[
3
,
3
,
5
]
=
10
...
...
@@ -84,8 +84,8 @@ class TestTensor(unittest.TestCase):
var_lod
=
scope
.
var
(
"test_lod_tensor"
)
lod_tensor
=
var_lod
.
get_tensor
()
lod_tensor
.
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
alloc_float
(
place
)
lod_tensor
.
_
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
self
.
assertEqual
((
5
,
2
,
3
,
4
),
tensor_array
.
shape
)
...
...
@@ -104,14 +104,13 @@ class TestTensor(unittest.TestCase):
self
.
assertListEqual
(
lod_py
,
lod
)
def
test_lod_tensor_init
(
self
):
scope
=
core
.
Scope
()
place
=
core
.
CPUPlace
()
lod_py
=
[[
2
,
1
],
[
1
,
2
,
2
]]
lod_tensor
=
core
.
LoDTensor
()
lod_tensor
.
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
_
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
set_recursive_sequence_lengths
(
lod_py
)
lod_tensor
.
alloc_float
(
place
)
lod_tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
[
0
,
0
,
0
,
0
]
=
1.0
tensor_array
[
0
,
0
,
0
,
1
]
=
2.0
...
...
@@ -129,9 +128,9 @@ class TestTensor(unittest.TestCase):
lod_py
=
[[
2
,
1
],
[
1
,
2
,
2
]]
lod_tensor
=
core
.
LoDTensor
()
lod_tensor
.
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
_
set_dims
([
5
,
2
,
3
,
4
])
lod_tensor
.
set_recursive_sequence_lengths
(
lod_py
)
lod_tensor
.
alloc_float
(
place
)
lod_tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
lod_tensor
)
tensor_array
[
0
,
0
,
0
,
0
]
=
1.0
tensor_array
[
0
,
0
,
0
,
1
]
=
2.0
...
...
@@ -149,15 +148,15 @@ class TestTensor(unittest.TestCase):
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
0
,
1
])
tensor
.
alloc_float
(
place
)
tensor
.
_
set_dims
([
0
,
1
])
tensor
.
_
alloc_float
(
place
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
0
,
1
),
tensor_array
.
shape
)
if
core
.
is_compiled_with_cuda
():
gpu_place
=
core
.
CUDAPlace
(
0
)
tensor
.
alloc_float
(
gpu_place
)
tensor
.
_
alloc_float
(
gpu_place
)
tensor_array
=
numpy
.
array
(
tensor
)
self
.
assertEqual
((
0
,
1
),
tensor_array
.
shape
)
...
...
python/paddle/fluid/tests/unittests/testsuite.py
浏览文件 @
ba997b8c
...
...
@@ -75,7 +75,7 @@ def set_input(scope, op, inputs, place):
if
isinstance
(
var
,
tuple
):
tensor
.
set_recursive_sequence_lengths
(
var
[
1
])
var
=
var
[
0
]
tensor
.
set_dims
(
var
.
shape
)
tensor
.
_
set_dims
(
var
.
shape
)
tensor
.
set
(
var
,
place
)
elif
isinstance
(
var
,
float
):
scope
.
find_var
(
var_name
).
set_float
(
var
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录