Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
4dc27439
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 2 年 前同步成功
通知
285
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4dc27439
编写于
9月 16, 2019
作者:
Z
zhangxuefei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/PaddleHub
into develop
上级
33c08f20
6a386bb3
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
276 addition
and
113 deletion
+276
-113
README.md
README.md
+2
-2
paddlehub/autofinetune/autoft.py
paddlehub/autofinetune/autoft.py
+216
-43
paddlehub/autofinetune/evaluator.py
paddlehub/autofinetune/evaluator.py
+4
-4
paddlehub/commands/autofinetune.py
paddlehub/commands/autofinetune.py
+32
-17
paddlehub/commands/config.py
paddlehub/commands/config.py
+10
-31
paddlehub/common/hub_server.py
paddlehub/common/hub_server.py
+0
-5
paddlehub/common/logger.py
paddlehub/common/logger.py
+6
-5
paddlehub/finetune/strategy.py
paddlehub/finetune/strategy.py
+1
-1
paddlehub/finetune/task/basic_task.py
paddlehub/finetune/task/basic_task.py
+4
-4
tutorial/autofinetune.ipynb
tutorial/autofinetune.ipynb
+1
-1
未找到文件。
README.md
浏览文件 @
4dc27439
...
@@ -60,8 +60,8 @@ $ hub run lac --input_text "今天是个好日子"
...
@@ -60,8 +60,8 @@ $ hub run lac --input_text "今天是个好日子"
使用
[
情感分析
](
http://www.paddlepaddle.org.cn/hub?filter=category&value=SentimentAnalysis
)
模型Senta对句子进行情感预测
使用
[
情感分析
](
http://www.paddlepaddle.org.cn/hub?filter=category&value=SentimentAnalysis
)
模型Senta对句子进行情感预测
```
shell
```
shell
$
hub run senta_bilstm
--input_text
"今天
是个好日子
"
$
hub run senta_bilstm
--input_text
"今天
天气真好
"
[{
'text'
:
'今天是个好日子'
,
'sentiment_label'
: 2,
'sentiment_key'
:
'positive'
,
'positive_probs'
: 0.6065,
'negative_probs'
: 0.3935
}]
{
'text'
:
'今天天气真好'
,
'sentiment_label'
: 1,
'sentiment_key'
:
'positive'
,
'positive_probs'
: 0.9798,
'negative_probs'
: 0.0202
}]
```
```
`示例三`
`示例三`
...
...
paddlehub/autofinetune/autoft.py
浏览文件 @
4dc27439
...
@@ -13,6 +13,7 @@
...
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
multiprocessing.pool
import
ThreadPool
from
multiprocessing.pool
import
ThreadPool
import
cma
import
copy
import
copy
import
json
import
json
import
math
import
math
...
@@ -20,8 +21,10 @@ import numpy as np
...
@@ -20,8 +21,10 @@ import numpy as np
import
six
import
six
import
time
import
time
from
tb_paddle
import
SummaryWriter
from
paddlehub.common.logger
import
logger
from
paddlehub.common.logger
import
logger
from
paddlehub.common.utils
import
mkdir
from
paddlehub.common.utils
import
mkdir
from
paddlehub.autofinetune.evaluator
import
REWARD_SUM
if
six
.
PY3
:
if
six
.
PY3
:
INF
=
math
.
inf
INF
=
math
.
inf
...
@@ -29,38 +32,30 @@ else:
...
@@ -29,38 +32,30 @@ else:
INF
=
float
(
"inf"
)
INF
=
float
(
"inf"
)
class
PSHE2
(
object
):
class
BaseTuningStrategy
(
object
):
def
__init__
(
def
__init__
(
self
,
self
,
evaluator
,
evaluator
,
cudas
=
[
"0"
],
cudas
=
[
"0"
],
popsize
=
5
,
popsize
=
5
,
output_dir
=
None
,
output_dir
=
None
,
alpha
=
0.5
,
epsilon
=
0.2
,
):
):
self
.
_num_thread
=
len
(
cudas
)
self
.
_num_thread
=
len
(
cudas
)
self
.
_popsize
=
popsize
self
.
_popsize
=
popsize
self
.
_alpha
=
alpha
self
.
_epsilon
=
epsilon
self
.
_iteration
=
0
self
.
cudas
=
cudas
self
.
cudas
=
cudas
self
.
is_cuda_free
=
{
"free"
:
[],
"busy"
:
[]}
self
.
is_cuda_free
=
{
"free"
:
[],
"busy"
:
[]}
self
.
is_cuda_free
[
"free"
]
=
cudas
self
.
is_cuda_free
[
"free"
]
=
cudas
self
.
_round
=
0
self
.
evaluator
=
evaluator
self
.
evaluator
=
evaluator
self
.
init_input
=
evaluator
.
get_init_params
()
self
.
init_input
=
evaluator
.
get_init_params
()
self
.
num_hparm
=
len
(
self
.
init_input
)
self
.
num_hparam
=
len
(
self
.
init_input
)
self
.
best_hparams_all_pop
=
[]
self
.
best_hparams_per_pop
=
[[
0
]
*
self
.
num_hparm
]
*
self
.
_popsize
self
.
best_reward_per_pop
=
[
INF
]
*
self
.
_popsize
self
.
momentums
=
[[
0
]
*
self
.
num_hparm
]
*
self
.
_popsize
self
.
best_hparms_all_pop
=
[]
self
.
best_reward_all_pop
=
INF
self
.
best_reward_all_pop
=
INF
self
.
current_hparams
=
[[
0
]
*
self
.
num_hparm
]
*
self
.
_popsize
self
.
current_hparams
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
_popsize
for
i
in
range
(
self
.
popsize
):
self
.
hparams_name_list
=
[
self
.
current_hparams
[
i
]
=
self
.
randomSolution
()
param
[
"name"
]
for
param
in
evaluator
.
params
[
'param_list'
]
]
if
output_dir
is
None
:
if
output_dir
is
None
:
now
=
int
(
time
.
time
())
now
=
int
(
time
.
time
())
...
@@ -68,6 +63,7 @@ class PSHE2(object):
...
@@ -68,6 +63,7 @@ class PSHE2(object):
self
.
_output_dir
=
"output_"
+
time_str
self
.
_output_dir
=
"output_"
+
time_str
else
:
else
:
self
.
_output_dir
=
output_dir
self
.
_output_dir
=
output_dir
self
.
writer
=
SummaryWriter
(
logdir
=
self
.
_output_dir
+
'/tb_paddle'
)
@
property
@
property
def
thread
(
self
):
def
thread
(
self
):
...
@@ -77,14 +73,6 @@ class PSHE2(object):
...
@@ -77,14 +73,6 @@ class PSHE2(object):
def
popsize
(
self
):
def
popsize
(
self
):
return
self
.
_popsize
return
self
.
_popsize
@
property
def
alpha
(
self
):
return
self
.
_alpha
@
property
def
epsilon
(
self
):
return
self
.
_epsilon
@
property
@
property
def
output_dir
(
self
):
def
output_dir
(
self
):
return
self
.
_output_dir
return
self
.
_output_dir
...
@@ -93,6 +81,10 @@ class PSHE2(object):
...
@@ -93,6 +81,10 @@ class PSHE2(object):
def
iteration
(
self
):
def
iteration
(
self
):
return
self
.
_iteration
return
self
.
_iteration
@
property
def
round
(
self
):
return
self
.
_round
def
set_output_dir
(
self
,
output_dir
=
None
):
def
set_output_dir
(
self
,
output_dir
=
None
):
if
output_dir
is
not
None
:
if
output_dir
is
not
None
:
output_dir
=
output_dir
output_dir
=
output_dir
...
@@ -154,31 +146,20 @@ class PSHE2(object):
...
@@ -154,31 +146,20 @@ class PSHE2(object):
def
is_stop
(
self
):
def
is_stop
(
self
):
return
False
return
False
def
solution
s
(
self
):
def
get_current_hparam
s
(
self
):
return
self
.
current_hparams
return
self
.
current_hparams
def
feedback
(
self
,
params_list
,
reward_list
):
def
feedback
(
self
,
params_list
,
reward_list
):
self
.
_iteration
=
self
.
_iteration
+
1
return
NotImplementedError
for
i
in
range
(
self
.
popsize
):
if
reward_list
[
i
]
<
self
.
best_reward_per_pop
[
i
]:
def
get_best_hparams
(
self
):
self
.
best_hparams_per_pop
[
i
]
=
copy
.
deepcopy
(
return
self
.
best_hparams_all_pop
self
.
current_hparams
[
i
])
self
.
best_reward_per_pop
[
i
]
=
reward_list
[
i
]
if
reward_list
[
i
]
<
self
.
best_reward_all_pop
:
self
.
best_hparms_all_pop
=
self
.
current_hparams
[
i
]
self
.
best_reward_all_pop
=
reward_list
[
i
]
self
.
estimateMomemtum
()
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
len
(
self
.
init_input
)):
self
.
current_hparams
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
+
self
.
alpha
*
self
.
momentums
[
i
][
j
]
self
.
smallPeturb
()
def
optimal_solution
(
self
):
def
get_best_eval_value
(
self
):
return
self
.
best_hparms
_all_pop
return
REWARD_SUM
-
self
.
best_reward
_all_pop
def
step
(
self
,
output_dir
):
def
step
(
self
,
output_dir
):
solutions
=
self
.
solution
s
()
solutions
=
self
.
get_current_hparam
s
()
params_cudas_dirs
=
[]
params_cudas_dirs
=
[]
solution_results
=
[]
solution_results
=
[]
...
@@ -209,3 +190,195 @@ class PSHE2(object):
...
@@ -209,3 +190,195 @@ class PSHE2(object):
self
.
feedback
(
solutions
,
solution_results
)
self
.
feedback
(
solutions
,
solution_results
)
return
solutions_ckptdirs
return
solutions_ckptdirs
class
HAZero
(
BaseTuningStrategy
):
def
__init__
(
self
,
evaluator
,
cudas
=
[
"0"
],
popsize
=
1
,
output_dir
=
None
,
sigma
=
0.2
,
):
super
(
HAZero
,
self
).
__init__
(
evaluator
,
cudas
,
popsize
,
output_dir
)
self
.
_sigma
=
sigma
self
.
evolution_stratefy
=
cma
.
CMAEvolutionStrategy
(
self
.
init_input
,
sigma
,
{
'popsize'
:
self
.
popsize
,
'bounds'
:
[
-
1
,
1
],
'AdaptSigma'
:
True
,
'verb_disp'
:
1
,
'verb_time'
:
'True'
,
})
@
property
def
sigma
(
self
):
return
self
.
_sigma
def
get_current_hparams
(
self
):
return
self
.
evolution_stratefy
.
ask
()
def
is_stop
(
self
):
return
self
.
evolution_stratefy
.
stop
()
def
feedback
(
self
,
params_list
,
reward_list
):
self
.
_round
=
self
.
_round
+
1
local_min_reward
=
min
(
reward_list
)
local_min_reward_index
=
reward_list
.
index
(
local_min_reward
)
local_hparams
=
self
.
evaluator
.
convert_params
(
params_list
[
local_min_reward_index
])
print
(
"The local best eval value in the %s-th round is %s."
%
(
self
.
_round
-
1
,
REWARD_SUM
-
local_min_reward
))
print
(
"The local best hyperparameters are as:"
)
for
index
,
hparam_name
in
enumerate
(
self
.
hparams_name_list
):
print
(
"%s=%s"
%
(
hparam_name
,
local_hparams
[
index
]))
for
i
in
range
(
self
.
popsize
):
if
reward_list
[
i
]
<
self
.
best_reward_all_pop
:
self
.
best_hparams_all_pop
=
self
.
current_hparams
[
i
]
self
.
best_reward_all_pop
=
reward_list
[
i
]
best_hparams
=
self
.
evaluator
.
convert_params
(
self
.
best_hparams_all_pop
)
for
index
,
name
in
enumerate
(
self
.
hparams_name_list
):
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/"
+
name
,
scalar_value
=
best_hparams
[
index
],
global_step
=
self
.
round
)
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/best_eval_value"
,
scalar_value
=
self
.
get_best_eval_value
(),
global_step
=
self
.
round
)
self
.
evolution_stratefy
.
tell
(
params_list
,
reward_list
)
self
.
evolution_stratefy
.
disp
()
def
get_best_hparams
(
self
):
return
list
(
self
.
evolution_stratefy
.
result
.
xbest
)
class
PSHE2
(
BaseTuningStrategy
):
def
__init__
(
self
,
evaluator
,
cudas
=
[
"0"
],
popsize
=
1
,
output_dir
=
None
,
alpha
=
0.5
,
epsilon
=
0.2
,
):
super
(
PSHE2
,
self
).
__init__
(
evaluator
,
cudas
,
popsize
,
output_dir
)
self
.
_alpha
=
alpha
self
.
_epsilon
=
epsilon
self
.
best_hparams_per_pop
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
_popsize
self
.
best_reward_per_pop
=
[
INF
]
*
self
.
_popsize
self
.
momentums
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
_popsize
for
i
in
range
(
self
.
popsize
):
self
.
current_hparams
[
i
]
=
self
.
set_random_hparam
()
@
property
def
alpha
(
self
):
return
self
.
_alpha
@
property
def
epsilon
(
self
):
return
self
.
_epsilon
def
set_random_hparam
(
self
):
solut
=
[
0
]
*
self
.
num_hparam
for
i
in
range
(
self
.
num_hparam
):
ratio
=
(
np
.
random
.
random_sample
()
-
0.5
)
*
2.0
if
ratio
>=
0
:
solut
[
i
]
=
(
1.0
-
self
.
init_input
[
i
])
*
ratio
+
self
.
init_input
[
i
]
else
:
solut
[
i
]
=
(
self
.
init_input
[
i
]
+
1.0
)
*
ratio
+
self
.
init_input
[
i
]
return
solut
def
small_peturb
(
self
):
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
ratio
=
(
np
.
random
.
random_sample
()
-
0.5
)
*
2.0
if
ratio
>=
0
:
self
.
current_hparams
[
i
][
j
]
=
(
1.0
-
self
.
current_hparams
[
i
][
j
]
)
*
ratio
*
self
.
epsilon
+
self
.
current_hparams
[
i
][
j
]
else
:
self
.
current_hparams
[
i
][
j
]
=
(
self
.
current_hparams
[
i
][
j
]
+
1.0
)
*
ratio
*
self
.
epsilon
+
self
.
current_hparams
[
i
][
j
]
def
estimate_popgradients
(
self
):
gradients
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
popsize
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
gradients
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
-
self
.
best_hparams_all_pop
[
j
]
return
gradients
def
estimate_local_gradients
(
self
):
gradients
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
popsize
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
gradients
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
-
self
.
best_hparams_per_pop
[
i
][
j
]
return
gradients
def
estimate_momemtum
(
self
):
popGrads
=
self
.
estimate_popgradients
()
localGrads
=
self
.
estimate_local_gradients
()
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
self
.
momentums
[
i
][
j
]
=
(
1
-
3.0
*
self
.
alpha
/
self
.
round
)
*
self
.
momentums
[
i
][
j
]
-
self
.
alpha
*
localGrads
[
i
][
j
]
-
self
.
alpha
*
popGrads
[
i
][
j
]
def
is_stop
(
self
):
return
False
def
feedback
(
self
,
params_list
,
reward_list
):
self
.
_round
=
self
.
_round
+
1
local_min_reward
=
min
(
reward_list
)
local_min_reward_index
=
reward_list
.
index
(
local_min_reward
)
local_hparams
=
self
.
evaluator
.
convert_params
(
params_list
[
local_min_reward_index
])
print
(
"The local best eval value in the %s-th round is %s."
%
(
self
.
_round
-
1
,
REWARD_SUM
-
local_min_reward
))
print
(
"The local best hyperparameters are as:"
)
for
index
,
hparam_name
in
enumerate
(
self
.
hparams_name_list
):
print
(
"%s=%s"
%
(
hparam_name
,
local_hparams
[
index
]))
for
i
in
range
(
self
.
popsize
):
if
reward_list
[
i
]
<
self
.
best_reward_per_pop
[
i
]:
self
.
best_hparams_per_pop
[
i
]
=
copy
.
deepcopy
(
self
.
current_hparams
[
i
])
self
.
best_reward_per_pop
[
i
]
=
reward_list
[
i
]
if
reward_list
[
i
]
<
self
.
best_reward_all_pop
:
self
.
best_hparams_all_pop
=
self
.
current_hparams
[
i
]
self
.
best_reward_all_pop
=
reward_list
[
i
]
best_hparams
=
self
.
evaluator
.
convert_params
(
self
.
best_hparams_all_pop
)
for
index
,
name
in
enumerate
(
self
.
hparams_name_list
):
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/"
+
name
,
scalar_value
=
best_hparams
[
index
],
global_step
=
self
.
round
)
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/best_eval_value"
,
scalar_value
=
self
.
get_best_eval_value
(),
global_step
=
self
.
round
)
self
.
estimate_momemtum
()
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
len
(
self
.
init_input
)):
self
.
current_hparams
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
+
self
.
alpha
*
self
.
momentums
[
i
][
j
]
self
.
small_peturb
()
paddlehub/autofinetune/evaluator.py
浏览文件 @
4dc27439
...
@@ -40,6 +40,7 @@ class BaseEvaluator(object):
...
@@ -40,6 +40,7 @@ class BaseEvaluator(object):
with
io
.
open
(
params_file
,
'r'
,
encoding
=
'utf8'
)
as
f
:
with
io
.
open
(
params_file
,
'r'
,
encoding
=
'utf8'
)
as
f
:
self
.
params
=
yaml
.
safe_load
(
f
)
self
.
params
=
yaml
.
safe_load
(
f
)
self
.
finetunee_script
=
finetunee_script
self
.
finetunee_script
=
finetunee_script
self
.
model_rewards
=
{}
def
get_init_params
(
self
):
def
get_init_params
(
self
):
init_params
=
[]
init_params
=
[]
...
@@ -134,7 +135,7 @@ class FullTrailEvaluator(BaseEvaluator):
...
@@ -134,7 +135,7 @@ class FullTrailEvaluator(BaseEvaluator):
os
.
system
(
run_cmd
)
os
.
system
(
run_cmd
)
with
open
(
log_file
,
"r"
)
as
f
:
with
open
(
log_file
,
"r"
)
as
f
:
lines
=
f
.
readlines
()
lines
=
f
.
readlines
()
eval_result
=
lines
[
-
1
]
eval_result
=
float
(
lines
[
-
1
])
except
:
except
:
print
(
print
(
"WARNING: Program which was ran with hyperparameters as %s was crashed!"
"WARNING: Program which was ran with hyperparameters as %s was crashed!"
...
@@ -148,7 +149,6 @@ class FullTrailEvaluator(BaseEvaluator):
...
@@ -148,7 +149,6 @@ class FullTrailEvaluator(BaseEvaluator):
class
ModelBasedEvaluator
(
BaseEvaluator
):
class
ModelBasedEvaluator
(
BaseEvaluator
):
def
__init__
(
self
,
params_file
,
finetunee_script
):
def
__init__
(
self
,
params_file
,
finetunee_script
):
super
(
ModelBasedEvaluator
,
self
).
__init__
(
params_file
,
finetunee_script
)
super
(
ModelBasedEvaluator
,
self
).
__init__
(
params_file
,
finetunee_script
)
self
.
model_rewards
=
{}
self
.
half_best_model_ckpt
=
[]
self
.
half_best_model_ckpt
=
[]
self
.
run_count
=
0
self
.
run_count
=
0
...
@@ -187,7 +187,7 @@ class ModelBasedEvaluator(BaseEvaluator):
...
@@ -187,7 +187,7 @@ class ModelBasedEvaluator(BaseEvaluator):
os
.
system
(
run_cmd
)
os
.
system
(
run_cmd
)
with
open
(
log_file
,
"r"
)
as
f
:
with
open
(
log_file
,
"r"
)
as
f
:
lines
=
f
.
readlines
()
lines
=
f
.
readlines
()
eval_result
=
lines
[
-
1
]
eval_result
=
float
(
lines
[
-
1
])
except
:
except
:
print
(
print
(
"WARNING: Program which was ran with hyperparameters as %s was crashed!"
"WARNING: Program which was ran with hyperparameters as %s was crashed!"
...
@@ -198,7 +198,7 @@ class ModelBasedEvaluator(BaseEvaluator):
...
@@ -198,7 +198,7 @@ class ModelBasedEvaluator(BaseEvaluator):
return
reward
return
reward
def
new_round
(
self
):
def
new_round
(
self
):
"""update
self.
half_best_model"""
"""update half_best_model"""
half_size
=
int
(
len
(
self
.
model_rewards
)
/
2
)
half_size
=
int
(
len
(
self
.
model_rewards
)
/
2
)
if
half_size
<
1
:
if
half_size
<
1
:
half_size
=
1
half_size
=
1
...
...
paddlehub/commands/autofinetune.py
浏览文件 @
4dc27439
...
@@ -31,6 +31,7 @@ import numpy as np
...
@@ -31,6 +31,7 @@ import numpy as np
from
paddlehub.commands.base_command
import
BaseCommand
,
ENTRY
from
paddlehub.commands.base_command
import
BaseCommand
,
ENTRY
from
paddlehub.common.arg_helper
import
add_argument
,
print_arguments
from
paddlehub.common.arg_helper
import
add_argument
,
print_arguments
from
paddlehub.autofinetune.autoft
import
PSHE2
from
paddlehub.autofinetune.autoft
import
PSHE2
from
paddlehub.autofinetune.autoft
import
HAZero
from
paddlehub.autofinetune.evaluator
import
FullTrailEvaluator
from
paddlehub.autofinetune.evaluator
import
FullTrailEvaluator
from
paddlehub.autofinetune.evaluator
import
ModelBasedEvaluator
from
paddlehub.autofinetune.evaluator
import
ModelBasedEvaluator
from
paddlehub.common.logger
import
logger
from
paddlehub.common.logger
import
logger
...
@@ -71,6 +72,7 @@ class AutoFineTuneCommand(BaseCommand):
...
@@ -71,6 +72,7 @@ class AutoFineTuneCommand(BaseCommand):
"--cuda"
,
"--cuda"
,
type
=
ast
.
literal_eval
,
type
=
ast
.
literal_eval
,
default
=
[
'0'
],
default
=
[
'0'
],
required
=
True
,
help
=
"The list of gpu devices to be used"
)
help
=
"The list of gpu devices to be used"
)
self
.
arg_config_group
.
add_argument
(
self
.
arg_config_group
.
add_argument
(
"--round"
,
type
=
int
,
default
=
10
,
help
=
"Number of searches"
)
"--round"
,
type
=
int
,
default
=
10
,
help
=
"Number of searches"
)
...
@@ -84,6 +86,11 @@ class AutoFineTuneCommand(BaseCommand):
...
@@ -84,6 +86,11 @@ class AutoFineTuneCommand(BaseCommand):
type
=
str
,
type
=
str
,
default
=
"fulltrail"
,
default
=
"fulltrail"
,
help
=
"Choices: fulltrail or modelbased."
)
help
=
"Choices: fulltrail or modelbased."
)
self
.
arg_config_group
.
add_argument
(
"--tuning_strategy"
,
type
=
str
,
default
=
"HAZero"
,
help
=
"Choices: HAZero or PSHE2."
)
def
execute
(
self
,
argv
):
def
execute
(
self
,
argv
):
if
not
argv
:
if
not
argv
:
...
@@ -121,11 +128,21 @@ class AutoFineTuneCommand(BaseCommand):
...
@@ -121,11 +128,21 @@ class AutoFineTuneCommand(BaseCommand):
raise
ValueError
(
raise
ValueError
(
"The evaluate %s is not defined!"
%
self
.
args
.
evaluate_choice
)
"The evaluate %s is not defined!"
%
self
.
args
.
evaluate_choice
)
if
self
.
args
.
tuning_strategy
.
lower
()
==
"hazero"
:
autoft
=
HAZero
(
evaluator
,
cudas
=
self
.
args
.
cuda
,
popsize
=
self
.
args
.
popsize
,
output_dir
=
self
.
args
.
output_dir
)
elif
self
.
args
.
tuning_strategy
.
lower
()
==
"pshe2"
:
autoft
=
PSHE2
(
autoft
=
PSHE2
(
evaluator
,
evaluator
,
cudas
=
self
.
args
.
cuda
,
cudas
=
self
.
args
.
cuda
,
popsize
=
self
.
args
.
popsize
,
popsize
=
self
.
args
.
popsize
,
output_dir
=
self
.
args
.
output_dir
)
output_dir
=
self
.
args
.
output_dir
)
else
:
raise
ValueError
(
"The tuning strategy %s is not defined!"
%
self
.
args
.
tuning_strategy
)
run_round_cnt
=
0
run_round_cnt
=
0
solutions_ckptdirs
=
{}
solutions_ckptdirs
=
{}
...
@@ -138,23 +155,21 @@ class AutoFineTuneCommand(BaseCommand):
...
@@ -138,23 +155,21 @@ class AutoFineTuneCommand(BaseCommand):
evaluator
.
new_round
()
evaluator
.
new_round
()
run_round_cnt
=
run_round_cnt
+
1
run_round_cnt
=
run_round_cnt
+
1
print
(
"PaddleHub Autofinetune ends."
)
print
(
"PaddleHub Autofinetune ends."
)
with
open
(
"./log_file.txt"
,
"w"
)
as
f
:
with
open
(
"./log_file.txt"
,
"w"
)
as
f
:
best_choice
=
evaluator
.
convert_params
(
autoft
.
optimal_solution
())
best_hparams
=
evaluator
.
convert_params
(
autoft
.
get_best_hparams
())
print
(
"The best hyperparameters:"
)
print
(
"The final best hyperparameters:"
)
f
.
write
(
"The best hyperparameters:
\n
"
)
f
.
write
(
"The final best hyperparameters:
\n
"
)
param_name
=
[]
for
index
,
hparam_name
in
enumerate
(
autoft
.
hparams_name_list
):
for
idx
,
param
in
enumerate
(
evaluator
.
params
[
"param_list"
]):
print
(
"%s=%s"
%
(
hparam_name
,
best_hparams
[
index
]))
param_name
.
append
(
param
[
"name"
])
f
.
write
(
hparam_name
+
"
\t
:
\t
"
+
str
(
best_hparams
[
index
])
+
"
\n
"
)
f
.
write
(
param
[
"name"
]
+
"
\t
:
\t
"
+
str
(
best_choice
[
idx
])
+
"
\n
"
)
print
(
"%s : %s"
%
(
param
[
"name"
],
best_choice
[
idx
]))
f
.
write
(
"
\n\n\n
"
)
f
.
write
(
"
\n\n\n
"
)
f
.
write
(
"
\t
"
.
join
(
param_name
)
+
"
\t
output_dir
\n\n
"
)
f
.
write
(
"
\t
"
.
join
(
autoft
.
hparams_name_list
)
+
"
\t
output_dir
\n\n
"
)
logger
.
info
(
logger
.
info
(
"The checkpont directory of programs ran with paramemters searched are saved as log_file.txt ."
"The checkpont directory of programs ran with
hyper
paramemters searched are saved as log_file.txt ."
)
)
print
(
print
(
"The checkpont directory of programs ran with paramemters searched are saved as log_file.txt ."
"The checkpont directory of programs ran with
hyper
paramemters searched are saved as log_file.txt ."
)
)
for
solution
,
ckptdir
in
solutions_ckptdirs
.
items
():
for
solution
,
ckptdir
in
solutions_ckptdirs
.
items
():
param
=
evaluator
.
convert_params
(
solution
)
param
=
evaluator
.
convert_params
(
solution
)
...
...
paddlehub/commands/config.py
浏览文件 @
4dc27439
...
@@ -67,25 +67,14 @@ class ConfigCommand(BaseCommand):
...
@@ -67,25 +67,14 @@ class ConfigCommand(BaseCommand):
print
(
"Set success! The current configuration is shown below."
)
print
(
"Set success! The current configuration is shown below."
)
print
(
json
.
dumps
(
config
,
indent
=
4
))
print
(
json
.
dumps
(
config
,
indent
=
4
))
@
staticmethod
def
show_server_url
():
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
config
=
json
.
load
(
fp
)
print
(
config
[
"server_url"
])
@
staticmethod
def
show_log_level
():
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
print
(
json
.
load
(
fp
)[
"log_level"
])
@
staticmethod
@
staticmethod
def
set_log_level
(
level
):
def
set_log_level
(
level
):
level
=
str
(
level
).
upper
()
if
level
not
in
[
if
level
not
in
[
"CRITICAL"
,
"FATAL"
,
"ERROR"
,
"WARN"
,
"WARNING"
,
"INFO"
,
"NOLOG"
,
"DEBUG"
,
"INFO"
,
"WARNING"
,
"ERROR"
,
"CRITICAL"
"DEBUG"
,
"NOTSET"
]:
]:
print
(
"Allowed values include: "
print
(
"Allowed values include: "
"
CRITICAL, FATAL, ERROR, WARN, WARNING, INFO, DEBUG, NOTSET
"
)
"
NOLOG, DEBUG, INFO, WARNING, ERROR, CRITICAL
"
)
return
return
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
current_config
=
json
.
load
(
fp
)
current_config
=
json
.
load
(
fp
)
...
@@ -97,18 +86,14 @@ class ConfigCommand(BaseCommand):
...
@@ -97,18 +86,14 @@ class ConfigCommand(BaseCommand):
@
staticmethod
@
staticmethod
def
show_help
():
def
show_help
():
str
=
"config <option>
<value>
\n
"
str
=
"config <option>
\n
"
str
+=
"
\t
Show hub server config without any option.
\n
"
str
+=
"
\t
Show hub server config without any option.
\n
"
str
+=
"option:
\n
"
str
+=
"option:
\n
"
str
+=
"reset
\n
"
str
+=
"reset
\n
"
str
+=
"
\t
Reset config as default.
\n
"
str
+=
"
\t
Reset config as default.
\n
"
str
+=
"server
\n
"
str
+=
"server==[URL]
\n
"
str
+=
"
\t
Show server url.
\n
"
str
+=
"server [URL]
\n
"
str
+=
"
\t
Set hub server url as [URL].
\n
"
str
+=
"
\t
Set hub server url as [URL].
\n
"
str
+=
"log
\n
"
str
+=
"log==[LEVEL]
\n
"
str
+=
"
\t
Show log level.
\n
"
str
+=
"log [LEVEL]
\n
"
str
+=
"
\t
Set log level as [LEVEL:NOLOG, DEBUG, INFO, WARNING, ERROR, CRITICAL].
\n
"
str
+=
"
\t
Set log level as [LEVEL:NOLOG, DEBUG, INFO, WARNING, ERROR, CRITICAL].
\n
"
print
(
str
)
print
(
str
)
...
@@ -118,16 +103,10 @@ class ConfigCommand(BaseCommand):
...
@@ -118,16 +103,10 @@ class ConfigCommand(BaseCommand):
ConfigCommand
.
show_config
()
ConfigCommand
.
show_config
()
elif
args
.
option
==
"reset"
:
elif
args
.
option
==
"reset"
:
ConfigCommand
.
set_config
(
default_server_config
)
ConfigCommand
.
set_config
(
default_server_config
)
elif
args
.
option
==
"server"
:
elif
args
.
option
.
startswith
(
"server=="
):
if
args
.
value
is
not
None
:
ConfigCommand
.
set_server_url
(
args
.
option
.
split
(
"=="
)[
1
])
ConfigCommand
.
set_server_url
(
args
.
value
)
elif
args
.
option
.
startswith
(
"log=="
):
else
:
ConfigCommand
.
set_log_level
(
args
.
option
.
split
(
"=="
)[
1
])
ConfigCommand
.
show_server_url
()
elif
args
.
option
==
"log"
:
if
args
.
value
is
not
None
:
ConfigCommand
.
set_log_level
(
args
.
value
)
else
:
ConfigCommand
.
show_log_level
()
else
:
else
:
ConfigCommand
.
show_help
()
ConfigCommand
.
show_help
()
return
True
return
True
...
...
paddlehub/common/hub_server.py
浏览文件 @
4dc27439
...
@@ -24,7 +24,6 @@ import requests
...
@@ -24,7 +24,6 @@ import requests
import
json
import
json
import
yaml
import
yaml
import
random
import
random
import
fcntl
from
random
import
randint
from
random
import
randint
from
paddlehub.common
import
utils
,
srv_utils
from
paddlehub.common
import
utils
,
srv_utils
...
@@ -39,9 +38,6 @@ CACHE_TIME = 60 * 10
...
@@ -39,9 +38,6 @@ CACHE_TIME = 60 * 10
class
HubServer
(
object
):
class
HubServer
(
object
):
def
__init__
(
self
,
config_file_path
=
None
):
def
__init__
(
self
,
config_file_path
=
None
):
LOCK_FILE
=
os
.
path
.
join
(
hub
.
HUB_HOME
,
'__LOCK__'
)
LOCK_FP
=
open
(
LOCK_FILE
,
'a+'
)
fcntl
.
flock
(
LOCK_FP
.
fileno
(),
fcntl
.
LOCK_EX
)
if
not
config_file_path
:
if
not
config_file_path
:
config_file_path
=
os
.
path
.
join
(
hub
.
CONF_HOME
,
'config.json'
)
config_file_path
=
os
.
path
.
join
(
hub
.
CONF_HOME
,
'config.json'
)
if
not
os
.
path
.
exists
(
hub
.
CONF_HOME
):
if
not
os
.
path
.
exists
(
hub
.
CONF_HOME
):
...
@@ -57,7 +53,6 @@ class HubServer(object):
...
@@ -57,7 +53,6 @@ class HubServer(object):
self
.
server_url
=
self
.
config
[
'server_url'
]
self
.
server_url
=
self
.
config
[
'server_url'
]
self
.
request
()
self
.
request
()
self
.
_load_resource_list_file_if_valid
()
self
.
_load_resource_list_file_if_valid
()
LOCK_FP
.
close
()
def
get_server_url
(
self
):
def
get_server_url
(
self
):
random
.
seed
(
int
(
time
.
time
()))
random
.
seed
(
int
(
time
.
time
()))
...
...
paddlehub/common/logger.py
浏览文件 @
4dc27439
...
@@ -39,12 +39,13 @@ class Logger(object):
...
@@ -39,12 +39,13 @@ class Logger(object):
self
.
handler
.
setFormatter
(
self
.
format
)
self
.
handler
.
setFormatter
(
self
.
format
)
self
.
logger
.
addHandler
(
self
.
handler
)
self
.
logger
.
addHandler
(
self
.
handler
)
if
not
os
.
path
.
exists
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
)):
self
.
logLevel
=
"DEBUG"
self
.
logLevel
=
"DEBUG"
else
:
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
self
.
logLevel
=
json
.
load
(
fp
).
get
(
"log_level"
,
"DEBUG"
)
self
.
logger
.
setLevel
(
self
.
_get_logging_level
())
self
.
logger
.
setLevel
(
self
.
_get_logging_level
())
if
os
.
path
.
exists
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
)):
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
level
=
json
.
load
(
fp
).
get
(
"log_level"
,
"DEBUG"
)
self
.
logLevel
=
level
self
.
setLevel
(
level
)
def
_is_no_log
(
self
):
def
_is_no_log
(
self
):
return
self
.
getLevel
()
==
Logger
.
NOLOG
return
self
.
getLevel
()
==
Logger
.
NOLOG
...
...
paddlehub/finetune/strategy.py
浏览文件 @
4dc27439
...
@@ -477,7 +477,7 @@ class CombinedStrategy(DefaultStrategy):
...
@@ -477,7 +477,7 @@ class CombinedStrategy(DefaultStrategy):
pass
pass
def
__str__
(
self
):
def
__str__
(
self
):
return
"Strategy with sheduler: %s, regularization: %s and clip: %s"
%
(
return
"Strategy with s
c
heduler: %s, regularization: %s and clip: %s"
%
(
self
.
scheduler
,
self
.
regularization
,
self
.
clip
)
self
.
scheduler
,
self
.
regularization
,
self
.
clip
)
...
...
paddlehub/finetune/task/basic_task.py
浏览文件 @
4dc27439
...
@@ -458,14 +458,14 @@ class BasicTask(object):
...
@@ -458,14 +458,14 @@ class BasicTask(object):
def
_eval_end_event
(
self
,
run_states
):
def
_eval_end_event
(
self
,
run_states
):
eval_scores
,
eval_loss
,
run_speed
=
self
.
_calculate_metrics
(
run_states
)
eval_scores
,
eval_loss
,
run_speed
=
self
.
_calculate_metrics
(
run_states
)
self
.
tb_writer
.
add_scalar
(
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/Loss [{}]
"
.
format
(
self
.
phase
),
tag
=
"Loss_{}
"
.
format
(
self
.
phase
),
scalar_value
=
eval_loss
,
scalar_value
=
eval_loss
,
global_step
=
self
.
current_step
)
global_step
=
self
.
current_step
)
log_scores
=
""
log_scores
=
""
for
metric
in
eval_scores
:
for
metric
in
eval_scores
:
self
.
tb_writer
.
add_scalar
(
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/{} [{}]
"
.
format
(
metric
,
self
.
phase
),
tag
=
"{}_{}
"
.
format
(
metric
,
self
.
phase
),
scalar_value
=
eval_scores
[
metric
],
scalar_value
=
eval_scores
[
metric
],
global_step
=
self
.
current_step
)
global_step
=
self
.
current_step
)
...
@@ -498,13 +498,13 @@ class BasicTask(object):
...
@@ -498,13 +498,13 @@ class BasicTask(object):
def
_log_interval_event
(
self
,
run_states
):
def
_log_interval_event
(
self
,
run_states
):
scores
,
avg_loss
,
run_speed
=
self
.
_calculate_metrics
(
run_states
)
scores
,
avg_loss
,
run_speed
=
self
.
_calculate_metrics
(
run_states
)
self
.
tb_writer
.
add_scalar
(
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/Loss [{}]
"
.
format
(
self
.
phase
),
tag
=
"Loss_{}
"
.
format
(
self
.
phase
),
scalar_value
=
avg_loss
,
scalar_value
=
avg_loss
,
global_step
=
self
.
current_step
)
global_step
=
self
.
current_step
)
log_scores
=
""
log_scores
=
""
for
metric
in
scores
:
for
metric
in
scores
:
self
.
tb_writer
.
add_scalar
(
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/{} [{}]
"
.
format
(
metric
,
self
.
phase
),
tag
=
"{}_{}
"
.
format
(
metric
,
self
.
phase
),
scalar_value
=
scores
[
metric
],
scalar_value
=
scores
[
metric
],
global_step
=
self
.
current_step
)
global_step
=
self
.
current_step
)
log_scores
+=
"%s=%.5f "
%
(
metric
,
scores
[
metric
])
log_scores
+=
"%s=%.5f "
%
(
metric
,
scores
[
metric
])
...
...
tutorial/autofinetune.ipynb
浏览文件 @
4dc27439
...
@@ -192,7 +192,7 @@
...
@@ -192,7 +192,7 @@
"\n",
"\n",
"> `--evaluate_choice`: 设置自动搜索超参的评价效果方式,可选fulltrail和modelbased, 默认为fulltrail\n",
"> `--evaluate_choice`: 设置自动搜索超参的评价效果方式,可选fulltrail和modelbased, 默认为fulltrail\n",
"\n",
"\n",
"> `--strategy`: 设置自动搜索超参策略,可选hazero和pshe2,默认为hazero\n"
"> `--
tuning_
strategy`: 设置自动搜索超参策略,可选hazero和pshe2,默认为hazero\n"
]
]
},
},
{
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录