Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
154e1d04
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
154e1d04
编写于
10月 23, 2017
作者:
武
武毅
提交者:
GitHub
10月 23, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #4972 from typhoonzero/fix_v2_optimizer_order
Fix v2 optimizer define order
上级
48173e85
6942eb2c
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
128 addition
and
0 deletion
+128
-0
python/paddle/v2/parameters.py
python/paddle/v2/parameters.py
+4
-0
python/paddle/v2/tests/CMakeLists.txt
python/paddle/v2/tests/CMakeLists.txt
+1
-0
python/paddle/v2/tests/test_paramconf_order.py
python/paddle/v2/tests/test_paramconf_order.py
+85
-0
python/paddle/v2/topology.py
python/paddle/v2/topology.py
+30
-0
python/paddle/v2/trainer.py
python/paddle/v2/trainer.py
+8
-0
未找到文件。
python/paddle/v2/parameters.py
浏览文件 @
154e1d04
...
@@ -101,6 +101,10 @@ class Parameters(object):
...
@@ -101,6 +101,10 @@ class Parameters(object):
self
.
__param_conf__
[
param_conf
.
name
]
=
param_conf
self
.
__param_conf__
[
param_conf
.
name
]
=
param_conf
def
update_param_conf
(
self
,
model_config
):
for
p
in
model_config
.
parameters
:
self
.
__param_conf__
[
p
.
name
]
=
p
def
keys
(
self
):
def
keys
(
self
):
"""
"""
keys are the names of each parameter.
keys are the names of each parameter.
...
...
python/paddle/v2/tests/CMakeLists.txt
浏览文件 @
154e1d04
...
@@ -5,3 +5,4 @@ py_test(test_topology SRCS test_topology.py)
...
@@ -5,3 +5,4 @@ py_test(test_topology SRCS test_topology.py)
py_test
(
test_rnn_layer SRCS test_rnn_layer.py
)
py_test
(
test_rnn_layer SRCS test_rnn_layer.py
)
py_test
(
test_parameters SRCS test_parameters.py
)
py_test
(
test_parameters SRCS test_parameters.py
)
py_test
(
test_data_feeder SRCS test_data_feeder.py
)
py_test
(
test_data_feeder SRCS test_data_feeder.py
)
py_test
(
test_paramconf_order SRCS test_paramconf_order.py
)
python/paddle/v2/tests/test_paramconf_order.py
0 → 100644
浏览文件 @
154e1d04
# Copyright PaddlePaddle contributors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
math
import
paddle.v2
as
paddle
def
wordemb
(
inlayer
):
wordemb
=
paddle
.
layer
.
table_projection
(
input
=
inlayer
,
size
=
5
,
param_attr
=
paddle
.
attr
.
Param
(
name
=
"_proj"
,
initial_std
=
0.001
,
learning_rate
=
1
,
l2_rate
=
0
))
return
wordemb
def
train
():
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
dict_size
=
len
(
word_dict
)
# Every layer takes integer value of range [0, dict_size)
firstword
=
paddle
.
layer
.
data
(
name
=
"firstw"
,
type
=
paddle
.
data_type
.
integer_value
(
dict_size
))
secondword
=
paddle
.
layer
.
data
(
name
=
"secondw"
,
type
=
paddle
.
data_type
.
integer_value
(
dict_size
))
thirdword
=
paddle
.
layer
.
data
(
name
=
"thirdw"
,
type
=
paddle
.
data_type
.
integer_value
(
dict_size
))
fourthword
=
paddle
.
layer
.
data
(
name
=
"fourthw"
,
type
=
paddle
.
data_type
.
integer_value
(
dict_size
))
nextword
=
paddle
.
layer
.
data
(
name
=
"fifthw"
,
type
=
paddle
.
data_type
.
integer_value
(
dict_size
))
Efirst
=
wordemb
(
firstword
)
Esecond
=
wordemb
(
secondword
)
Ethird
=
wordemb
(
thirdword
)
Efourth
=
wordemb
(
fourthword
)
contextemb
=
paddle
.
layer
.
concat
(
input
=
[
Efirst
,
Esecond
,
Ethird
,
Efourth
])
hidden1
=
paddle
.
layer
.
fc
(
name
=
"fc1"
,
input
=
contextemb
,
size
=
128
,
act
=
paddle
.
activation
.
Sigmoid
(),
layer_attr
=
paddle
.
attr
.
Extra
(
drop_rate
=
0.5
),
bias_attr
=
paddle
.
attr
.
Param
(
learning_rate
=
2
),
param_attr
=
paddle
.
attr
.
Param
(
initial_std
=
1.
/
math
.
sqrt
(
5
*
8
),
learning_rate
=
1
,
l2_rate
=
6e-4
))
predictword
=
paddle
.
layer
.
fc
(
input
=
hidden1
,
size
=
dict_size
,
bias_attr
=
paddle
.
attr
.
Param
(
learning_rate
=
2
),
act
=
paddle
.
activation
.
Softmax
())
return
paddle
.
layer
.
classification_cost
(
input
=
predictword
,
label
=
nextword
)
class
TestParamConfOrder
(
unittest
.
TestCase
):
def
test_param_conf_order
(
self
):
paddle
.
init
()
cost
=
train
()
parameters
=
paddle
.
parameters
.
create
(
cost
)
adagrad
=
paddle
.
optimizer
.
AdaGrad
(
learning_rate
=
3e-3
,
regularization
=
paddle
.
optimizer
.
L2Regularization
(
rate
=
8e-4
))
trainer
=
paddle
.
trainer
.
SGD
(
cost
,
parameters
,
adagrad
)
for
p
in
trainer
.
get_topology_proto
().
parameters
:
if
p
.
name
==
"_fc1.w0"
:
self
.
assertEqual
(
p
.
decay_rate
,
6e-4
)
else
:
self
.
assertEqual
(
p
.
decay_rate
,
8e-4
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/v2/topology.py
浏览文件 @
154e1d04
...
@@ -19,6 +19,7 @@ import paddle.trainer_config_helpers as conf_helps
...
@@ -19,6 +19,7 @@ import paddle.trainer_config_helpers as conf_helps
import
layer
as
v2_layer
import
layer
as
v2_layer
import
config_base
import
config_base
import
cPickle
import
cPickle
from
paddle.trainer
import
config_parser
as
cp
__all__
=
[
'Topology'
]
__all__
=
[
'Topology'
]
...
@@ -50,6 +51,35 @@ class Topology(object):
...
@@ -50,6 +51,35 @@ class Topology(object):
assert
isinstance
(
self
.
__model_config__
,
ModelConfig
)
assert
isinstance
(
self
.
__model_config__
,
ModelConfig
)
def
update_from_default
(
self
):
# HACK(typhoonzero): update ParameterConfig(proto) in case of
# optimizers are defined after layers, or between layers.
# Must be called from trainer.__init__()
for
parameter
in
self
.
__model_config__
.
parameters
:
if
parameter
.
momentum
==
0.0
and
cp
.
g_default_momentum
:
parameter
.
momentum
=
cp
.
g_default_momentum
if
parameter
.
decay_rate
==
0.0
and
cp
.
g_default_decay_rate
:
parameter
.
decay_rate
=
cp
.
g_default_decay_rate
if
parameter
.
initial_mean
==
0.0
:
parameter
.
initial_mean
=
cp
.
g_default_initial_mean
if
parameter
.
initial_std
==
0.01
:
parameter
.
initial_std
=
cp
.
g_default_initial_std
if
parameter
.
initial_strategy
==
0
:
parameter
.
initial_strategy
=
cp
.
g_default_initial_strategy
if
parameter
.
initial_smart
==
False
:
parameter
.
initial_smart
=
cp
.
g_default_initial_smart
if
parameter
.
num_batches_regularization
==
1
and
\
cp
.
g_default_num_batches_regularization
:
parameter
.
num_batches_regularization
=
\
cp
.
g_default_num_batches_regularization
if
parameter
.
gradient_clipping_threshold
==
0.0
and
\
cp
.
g_default_gradient_clipping_threshold
:
parameter
.
gradient_clipping_threshold
=
\
cp
.
g_default_gradient_clipping_threshold
if
parameter
.
device
==
-
1
and
cp
.
g_default_device
:
parameter
.
device
=
cp
.
g_default_device
# FIXME(typhoonzero): ignored: update_hooks, g_default_compact_func
def
use_sparse_updater
(
self
):
def
use_sparse_updater
(
self
):
"""
"""
check if any parameter require to use sparse_update
check if any parameter require to use sparse_update
...
...
python/paddle/v2/trainer.py
浏览文件 @
154e1d04
...
@@ -64,6 +64,11 @@ class SGD(object):
...
@@ -64,6 +64,11 @@ class SGD(object):
"paddle.v2.optimizer.Optimizer"
)
"paddle.v2.optimizer.Optimizer"
)
import
py_paddle.swig_paddle
as
api
import
py_paddle.swig_paddle
as
api
topology
=
Topology
(
cost
,
extra_layers
=
extra_layers
)
topology
=
Topology
(
cost
,
extra_layers
=
extra_layers
)
# HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers
# are defined after layers, or between layers.
topology
.
update_from_default
()
parameters
.
update_param_conf
(
topology
.
proto
())
self
.
__optimizer__
=
update_equation
self
.
__optimizer__
=
update_equation
self
.
__topology__
=
topology
self
.
__topology__
=
topology
self
.
__parameters__
=
parameters
self
.
__parameters__
=
parameters
...
@@ -91,6 +96,9 @@ class SGD(object):
...
@@ -91,6 +96,9 @@ class SGD(object):
self
.
__parameters__
.
append_gradient_machine
(
gm
)
self
.
__parameters__
.
append_gradient_machine
(
gm
)
self
.
__parameter_updater__
=
None
self
.
__parameter_updater__
=
None
def
get_topology_proto
(
self
):
return
self
.
__topology_in_proto__
def
__use_remote_sparse_updater__
(
self
):
def
__use_remote_sparse_updater__
(
self
):
return
self
.
__use_sparse_updater__
and
not
self
.
__is_local__
return
self
.
__use_sparse_updater__
and
not
self
.
__is_local__
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录