Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
12e9c003
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
12e9c003
编写于
1月 23, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add optimizer
上级
a3f0aed0
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
77 addition
and
25 deletion
+77
-25
demo/mnist/api_train.py
demo/mnist/api_train.py
+15
-25
python/paddle/v2/__init__.py
python/paddle/v2/__init__.py
+4
-0
python/paddle/v2/optimizer.py
python/paddle/v2/optimizer.py
+58
-0
未找到文件。
demo/mnist/api_train.py
浏览文件 @
12e9c003
...
@@ -13,15 +13,7 @@ import numpy as np
...
@@ -13,15 +13,7 @@ import numpy as np
import
random
import
random
from
mnist_util
import
read_from_mnist
from
mnist_util
import
read_from_mnist
from
paddle.trainer_config_helpers
import
*
from
paddle.trainer_config_helpers
import
*
import
paddle.v2
def
optimizer_config
():
settings
(
learning_rate
=
1e-4
,
learning_method
=
AdamOptimizer
(),
batch_size
=
1000
,
model_average
=
ModelAverage
(
average_window
=
0.5
),
regularization
=
L2Regularization
(
rate
=
0.5
))
def
network_config
():
def
network_config
():
...
@@ -75,19 +67,23 @@ def input_order_converter(generator):
...
@@ -75,19 +67,23 @@ def input_order_converter(generator):
def
main
():
def
main
():
api
.
initPaddle
(
"-use_gpu=false"
,
"-trainer_count=4"
)
# use 4 cpu cores
api
.
initPaddle
(
"-use_gpu=false"
,
"-trainer_count=4"
)
# use 4 cpu cores
# get enable_types for each optimizer.
optimizer
=
paddle
.
v2
.
optimizer
.
Adam
(
# enable_types = [value, gradient, momentum, etc]
learning_rate
=
1e-4
,
# For each optimizer(SGD, Adam), GradientMachine should enable different
batch_size
=
1000
,
# buffers.
model_average
=
ModelAverage
(
average_window
=
0.5
),
opt_config_proto
=
parse_optimizer_config
(
optimizer_config
)
regularization
=
L2Regularization
(
rate
=
0.5
))
opt_config
=
api
.
OptimizationConfig
.
createFromProto
(
opt_config_proto
)
_temp_optimizer_
=
api
.
ParameterOptimizer
.
create
(
opt_config
)
# Create Local Updater. Local means not run in cluster.
enable_types
=
_temp_optimizer_
.
getParameterTypes
()
# For a cluster training, here we can change to createRemoteUpdater
# in future.
updater
=
optimizer
.
create_local_updater
()
assert
isinstance
(
updater
,
api
.
ParameterUpdater
)
# Create Simple Gradient Machine.
# Create Simple Gradient Machine.
model_config
=
parse_network_config
(
network_config
)
model_config
=
parse_network_config
(
network_config
)
m
=
api
.
GradientMachine
.
createFromConfigProto
(
m
=
api
.
GradientMachine
.
createFromConfigProto
(
model_config
,
model_config
,
api
.
CREATE_MODE_NORMAL
,
enable_types
)
api
.
CREATE_MODE_NORMAL
,
optimizer
.
enable_types
())
# This type check is not useful. Only enable type hint in IDE.
# This type check is not useful. Only enable type hint in IDE.
# Such as PyCharm
# Such as PyCharm
...
@@ -96,12 +92,6 @@ def main():
...
@@ -96,12 +92,6 @@ def main():
# Initialize Parameter by numpy.
# Initialize Parameter by numpy.
init_parameter
(
network
=
m
)
init_parameter
(
network
=
m
)
# Create Local Updater. Local means not run in cluster.
# For a cluster training, here we can change to createRemoteUpdater
# in future.
updater
=
api
.
ParameterUpdater
.
createLocalUpdater
(
opt_config
)
assert
isinstance
(
updater
,
api
.
ParameterUpdater
)
# Initialize ParameterUpdater.
# Initialize ParameterUpdater.
updater
.
init
(
m
)
updater
.
init
(
m
)
...
...
python/paddle/v2/__init__.py
浏览文件 @
12e9c003
...
@@ -11,3 +11,7 @@
...
@@ -11,3 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
optimizer
__all__
=
[
'optimizer'
]
python/paddle/v2/optimizer.py
0 → 100644
浏览文件 @
12e9c003
import
py_paddle.swig_paddle
as
swig_api
import
paddle.trainer_config_helpers.optimizers
as
v1_optimizers
import
paddle.trainer_config_helpers.config_parser_utils
as
config_parser_utils
import
paddle.v2
__all__
=
[
'Adam'
,
'Adamax'
]
class
Optimizer
(
object
):
def
__init__
(
self
,
**
kwargs
):
if
'batch_size'
in
kwargs
:
del
kwargs
[
'batch_size'
]
# not important for python library.
def
__impl__
():
v1_optimizers
.
settings
(
batch_size
=
1
,
**
kwargs
)
self
.
__opt_conf_proto__
=
config_parser_utils
.
parse_optimizer_config
(
__impl__
)
self
.
__opt_conf__
=
swig_api
.
OptimizationConfig
.
createFromProto
(
self
.
__opt_conf_proto__
)
def
enable_types
(
self
):
"""
get enable_types for each optimizer.
enable_types = [value, gradient, momentum, etc]
For each optimizer(SGD, Adam), GradientMachine should enable different
buffers.
"""
tmp
=
swig_api
.
ParameterOptimizer
.
create
(
self
.
__opt_conf__
)
assert
isinstance
(
tmp
,
swig_api
.
ParameterOptimizer
)
return
tmp
.
getParameterTypes
()
def
create_local_updater
(
self
):
return
swig_api
.
ParameterUpdater
.
createLocalUpdater
(
self
.
__opt_conf__
)
def
create_remote_updater
(
self
,
pass_num
):
return
swig_api
.
ParameterUpdater
.
createRemoteUpdater
(
self
.
__opt_conf__
,
pass_num
)
class
Adam
(
Optimizer
):
def
__init__
(
self
,
beta1
=
0.9
,
beta2
=
0.999
,
epsilon
=
1e-8
,
**
kwargs
):
learning_method
=
v1_optimizers
.
AdamOptimizer
(
beta1
=
beta1
,
beta2
=
beta2
,
epsilon
=
epsilon
)
super
(
Adam
,
self
).
__init__
(
learning_method
=
learning_method
,
**
kwargs
)
class
Adamax
(
Optimizer
):
def
__init__
(
self
,
beta1
=
0.9
,
beta2
=
0.999
,
**
kwargs
):
learning_method
=
v1_optimizers
.
AdamaxOptimizer
(
beta1
=
beta1
,
beta2
=
beta2
)
super
(
Adamax
,
self
).
__init__
(
learning_method
=
learning_method
,
**
kwargs
)
if
__name__
==
'__main__'
:
swig_api
.
initPaddle
(
'--use_gpu=false'
)
opt
=
paddle
.
v2
.
optimizer
.
Adam
()
print
opt
.
enable_types
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录