Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
4dc27439
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
10 个月 前同步成功
通知
280
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
4dc27439
编写于
9月 16, 2019
作者:
Z
zhangxuefei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/PaddleHub
into develop
上级
33c08f20
6a386bb3
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
276 addition
and
113 deletion
+276
-113
README.md
README.md
+2
-2
paddlehub/autofinetune/autoft.py
paddlehub/autofinetune/autoft.py
+216
-43
paddlehub/autofinetune/evaluator.py
paddlehub/autofinetune/evaluator.py
+4
-4
paddlehub/commands/autofinetune.py
paddlehub/commands/autofinetune.py
+32
-17
paddlehub/commands/config.py
paddlehub/commands/config.py
+10
-31
paddlehub/common/hub_server.py
paddlehub/common/hub_server.py
+0
-5
paddlehub/common/logger.py
paddlehub/common/logger.py
+6
-5
paddlehub/finetune/strategy.py
paddlehub/finetune/strategy.py
+1
-1
paddlehub/finetune/task/basic_task.py
paddlehub/finetune/task/basic_task.py
+4
-4
tutorial/autofinetune.ipynb
tutorial/autofinetune.ipynb
+1
-1
未找到文件。
README.md
浏览文件 @
4dc27439
...
...
@@ -60,8 +60,8 @@ $ hub run lac --input_text "今天是个好日子"
使用
[
情感分析
](
http://www.paddlepaddle.org.cn/hub?filter=category&value=SentimentAnalysis
)
模型Senta对句子进行情感预测
```
shell
$
hub run senta_bilstm
--input_text
"今天
是个好日子
"
[{
'text'
:
'今天是个好日子'
,
'sentiment_label'
: 2,
'sentiment_key'
:
'positive'
,
'positive_probs'
: 0.6065,
'negative_probs'
: 0.3935
}]
$
hub run senta_bilstm
--input_text
"今天
天气真好
"
{
'text'
:
'今天天气真好'
,
'sentiment_label'
: 1,
'sentiment_key'
:
'positive'
,
'positive_probs'
: 0.9798,
'negative_probs'
: 0.0202
}]
```
`示例三`
...
...
paddlehub/autofinetune/autoft.py
浏览文件 @
4dc27439
...
...
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
multiprocessing.pool
import
ThreadPool
import
cma
import
copy
import
json
import
math
...
...
@@ -20,8 +21,10 @@ import numpy as np
import
six
import
time
from
tb_paddle
import
SummaryWriter
from
paddlehub.common.logger
import
logger
from
paddlehub.common.utils
import
mkdir
from
paddlehub.autofinetune.evaluator
import
REWARD_SUM
if
six
.
PY3
:
INF
=
math
.
inf
...
...
@@ -29,38 +32,30 @@ else:
INF
=
float
(
"inf"
)
class
PSHE2
(
object
):
class
BaseTuningStrategy
(
object
):
def
__init__
(
self
,
evaluator
,
cudas
=
[
"0"
],
popsize
=
5
,
output_dir
=
None
,
alpha
=
0.5
,
epsilon
=
0.2
,
):
self
.
_num_thread
=
len
(
cudas
)
self
.
_popsize
=
popsize
self
.
_alpha
=
alpha
self
.
_epsilon
=
epsilon
self
.
_iteration
=
0
self
.
cudas
=
cudas
self
.
is_cuda_free
=
{
"free"
:
[],
"busy"
:
[]}
self
.
is_cuda_free
[
"free"
]
=
cudas
self
.
_round
=
0
self
.
evaluator
=
evaluator
self
.
init_input
=
evaluator
.
get_init_params
()
self
.
num_hparm
=
len
(
self
.
init_input
)
self
.
best_hparams_per_pop
=
[[
0
]
*
self
.
num_hparm
]
*
self
.
_popsize
self
.
best_reward_per_pop
=
[
INF
]
*
self
.
_popsize
self
.
momentums
=
[[
0
]
*
self
.
num_hparm
]
*
self
.
_popsize
self
.
best_hparms_all_pop
=
[]
self
.
num_hparam
=
len
(
self
.
init_input
)
self
.
best_hparams_all_pop
=
[]
self
.
best_reward_all_pop
=
INF
self
.
current_hparams
=
[[
0
]
*
self
.
num_hparm
]
*
self
.
_popsize
for
i
in
range
(
self
.
popsize
):
self
.
current_hparams
[
i
]
=
self
.
randomSolution
()
self
.
current_hparams
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
_popsize
self
.
hparams_name_list
=
[
param
[
"name"
]
for
param
in
evaluator
.
params
[
'param_list'
]
]
if
output_dir
is
None
:
now
=
int
(
time
.
time
())
...
...
@@ -68,6 +63,7 @@ class PSHE2(object):
self
.
_output_dir
=
"output_"
+
time_str
else
:
self
.
_output_dir
=
output_dir
self
.
writer
=
SummaryWriter
(
logdir
=
self
.
_output_dir
+
'/tb_paddle'
)
@
property
def
thread
(
self
):
...
...
@@ -77,14 +73,6 @@ class PSHE2(object):
def
popsize
(
self
):
return
self
.
_popsize
@
property
def
alpha
(
self
):
return
self
.
_alpha
@
property
def
epsilon
(
self
):
return
self
.
_epsilon
@
property
def
output_dir
(
self
):
return
self
.
_output_dir
...
...
@@ -93,6 +81,10 @@ class PSHE2(object):
def
iteration
(
self
):
return
self
.
_iteration
@
property
def
round
(
self
):
return
self
.
_round
def
set_output_dir
(
self
,
output_dir
=
None
):
if
output_dir
is
not
None
:
output_dir
=
output_dir
...
...
@@ -154,31 +146,20 @@ class PSHE2(object):
def
is_stop
(
self
):
return
False
def
solution
s
(
self
):
def
get_current_hparam
s
(
self
):
return
self
.
current_hparams
def
feedback
(
self
,
params_list
,
reward_list
):
self
.
_iteration
=
self
.
_iteration
+
1
for
i
in
range
(
self
.
popsize
):
if
reward_list
[
i
]
<
self
.
best_reward_per_pop
[
i
]:
self
.
best_hparams_per_pop
[
i
]
=
copy
.
deepcopy
(
self
.
current_hparams
[
i
])
self
.
best_reward_per_pop
[
i
]
=
reward_list
[
i
]
if
reward_list
[
i
]
<
self
.
best_reward_all_pop
:
self
.
best_hparms_all_pop
=
self
.
current_hparams
[
i
]
self
.
best_reward_all_pop
=
reward_list
[
i
]
self
.
estimateMomemtum
()
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
len
(
self
.
init_input
)):
self
.
current_hparams
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
+
self
.
alpha
*
self
.
momentums
[
i
][
j
]
self
.
smallPeturb
()
return
NotImplementedError
def
get_best_hparams
(
self
):
return
self
.
best_hparams_all_pop
def
optimal_solution
(
self
):
return
self
.
best_hparms
_all_pop
def
get_best_eval_value
(
self
):
return
REWARD_SUM
-
self
.
best_reward
_all_pop
def
step
(
self
,
output_dir
):
solutions
=
self
.
solution
s
()
solutions
=
self
.
get_current_hparam
s
()
params_cudas_dirs
=
[]
solution_results
=
[]
...
...
@@ -209,3 +190,195 @@ class PSHE2(object):
self
.
feedback
(
solutions
,
solution_results
)
return
solutions_ckptdirs
class
HAZero
(
BaseTuningStrategy
):
def
__init__
(
self
,
evaluator
,
cudas
=
[
"0"
],
popsize
=
1
,
output_dir
=
None
,
sigma
=
0.2
,
):
super
(
HAZero
,
self
).
__init__
(
evaluator
,
cudas
,
popsize
,
output_dir
)
self
.
_sigma
=
sigma
self
.
evolution_stratefy
=
cma
.
CMAEvolutionStrategy
(
self
.
init_input
,
sigma
,
{
'popsize'
:
self
.
popsize
,
'bounds'
:
[
-
1
,
1
],
'AdaptSigma'
:
True
,
'verb_disp'
:
1
,
'verb_time'
:
'True'
,
})
@
property
def
sigma
(
self
):
return
self
.
_sigma
def
get_current_hparams
(
self
):
return
self
.
evolution_stratefy
.
ask
()
def
is_stop
(
self
):
return
self
.
evolution_stratefy
.
stop
()
def
feedback
(
self
,
params_list
,
reward_list
):
self
.
_round
=
self
.
_round
+
1
local_min_reward
=
min
(
reward_list
)
local_min_reward_index
=
reward_list
.
index
(
local_min_reward
)
local_hparams
=
self
.
evaluator
.
convert_params
(
params_list
[
local_min_reward_index
])
print
(
"The local best eval value in the %s-th round is %s."
%
(
self
.
_round
-
1
,
REWARD_SUM
-
local_min_reward
))
print
(
"The local best hyperparameters are as:"
)
for
index
,
hparam_name
in
enumerate
(
self
.
hparams_name_list
):
print
(
"%s=%s"
%
(
hparam_name
,
local_hparams
[
index
]))
for
i
in
range
(
self
.
popsize
):
if
reward_list
[
i
]
<
self
.
best_reward_all_pop
:
self
.
best_hparams_all_pop
=
self
.
current_hparams
[
i
]
self
.
best_reward_all_pop
=
reward_list
[
i
]
best_hparams
=
self
.
evaluator
.
convert_params
(
self
.
best_hparams_all_pop
)
for
index
,
name
in
enumerate
(
self
.
hparams_name_list
):
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/"
+
name
,
scalar_value
=
best_hparams
[
index
],
global_step
=
self
.
round
)
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/best_eval_value"
,
scalar_value
=
self
.
get_best_eval_value
(),
global_step
=
self
.
round
)
self
.
evolution_stratefy
.
tell
(
params_list
,
reward_list
)
self
.
evolution_stratefy
.
disp
()
def
get_best_hparams
(
self
):
return
list
(
self
.
evolution_stratefy
.
result
.
xbest
)
class
PSHE2
(
BaseTuningStrategy
):
def
__init__
(
self
,
evaluator
,
cudas
=
[
"0"
],
popsize
=
1
,
output_dir
=
None
,
alpha
=
0.5
,
epsilon
=
0.2
,
):
super
(
PSHE2
,
self
).
__init__
(
evaluator
,
cudas
,
popsize
,
output_dir
)
self
.
_alpha
=
alpha
self
.
_epsilon
=
epsilon
self
.
best_hparams_per_pop
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
_popsize
self
.
best_reward_per_pop
=
[
INF
]
*
self
.
_popsize
self
.
momentums
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
_popsize
for
i
in
range
(
self
.
popsize
):
self
.
current_hparams
[
i
]
=
self
.
set_random_hparam
()
@
property
def
alpha
(
self
):
return
self
.
_alpha
@
property
def
epsilon
(
self
):
return
self
.
_epsilon
def
set_random_hparam
(
self
):
solut
=
[
0
]
*
self
.
num_hparam
for
i
in
range
(
self
.
num_hparam
):
ratio
=
(
np
.
random
.
random_sample
()
-
0.5
)
*
2.0
if
ratio
>=
0
:
solut
[
i
]
=
(
1.0
-
self
.
init_input
[
i
])
*
ratio
+
self
.
init_input
[
i
]
else
:
solut
[
i
]
=
(
self
.
init_input
[
i
]
+
1.0
)
*
ratio
+
self
.
init_input
[
i
]
return
solut
def
small_peturb
(
self
):
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
ratio
=
(
np
.
random
.
random_sample
()
-
0.5
)
*
2.0
if
ratio
>=
0
:
self
.
current_hparams
[
i
][
j
]
=
(
1.0
-
self
.
current_hparams
[
i
][
j
]
)
*
ratio
*
self
.
epsilon
+
self
.
current_hparams
[
i
][
j
]
else
:
self
.
current_hparams
[
i
][
j
]
=
(
self
.
current_hparams
[
i
][
j
]
+
1.0
)
*
ratio
*
self
.
epsilon
+
self
.
current_hparams
[
i
][
j
]
def
estimate_popgradients
(
self
):
gradients
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
popsize
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
gradients
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
-
self
.
best_hparams_all_pop
[
j
]
return
gradients
def
estimate_local_gradients
(
self
):
gradients
=
[[
0
]
*
self
.
num_hparam
]
*
self
.
popsize
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
gradients
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
-
self
.
best_hparams_per_pop
[
i
][
j
]
return
gradients
def
estimate_momemtum
(
self
):
popGrads
=
self
.
estimate_popgradients
()
localGrads
=
self
.
estimate_local_gradients
()
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
self
.
num_hparam
):
self
.
momentums
[
i
][
j
]
=
(
1
-
3.0
*
self
.
alpha
/
self
.
round
)
*
self
.
momentums
[
i
][
j
]
-
self
.
alpha
*
localGrads
[
i
][
j
]
-
self
.
alpha
*
popGrads
[
i
][
j
]
def
is_stop
(
self
):
return
False
def
feedback
(
self
,
params_list
,
reward_list
):
self
.
_round
=
self
.
_round
+
1
local_min_reward
=
min
(
reward_list
)
local_min_reward_index
=
reward_list
.
index
(
local_min_reward
)
local_hparams
=
self
.
evaluator
.
convert_params
(
params_list
[
local_min_reward_index
])
print
(
"The local best eval value in the %s-th round is %s."
%
(
self
.
_round
-
1
,
REWARD_SUM
-
local_min_reward
))
print
(
"The local best hyperparameters are as:"
)
for
index
,
hparam_name
in
enumerate
(
self
.
hparams_name_list
):
print
(
"%s=%s"
%
(
hparam_name
,
local_hparams
[
index
]))
for
i
in
range
(
self
.
popsize
):
if
reward_list
[
i
]
<
self
.
best_reward_per_pop
[
i
]:
self
.
best_hparams_per_pop
[
i
]
=
copy
.
deepcopy
(
self
.
current_hparams
[
i
])
self
.
best_reward_per_pop
[
i
]
=
reward_list
[
i
]
if
reward_list
[
i
]
<
self
.
best_reward_all_pop
:
self
.
best_hparams_all_pop
=
self
.
current_hparams
[
i
]
self
.
best_reward_all_pop
=
reward_list
[
i
]
best_hparams
=
self
.
evaluator
.
convert_params
(
self
.
best_hparams_all_pop
)
for
index
,
name
in
enumerate
(
self
.
hparams_name_list
):
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/"
+
name
,
scalar_value
=
best_hparams
[
index
],
global_step
=
self
.
round
)
self
.
writer
.
add_scalar
(
tag
=
"hyperparameter tuning/best_eval_value"
,
scalar_value
=
self
.
get_best_eval_value
(),
global_step
=
self
.
round
)
self
.
estimate_momemtum
()
for
i
in
range
(
self
.
popsize
):
for
j
in
range
(
len
(
self
.
init_input
)):
self
.
current_hparams
[
i
][
j
]
=
self
.
current_hparams
[
i
][
j
]
+
self
.
alpha
*
self
.
momentums
[
i
][
j
]
self
.
small_peturb
()
paddlehub/autofinetune/evaluator.py
浏览文件 @
4dc27439
...
...
@@ -40,6 +40,7 @@ class BaseEvaluator(object):
with
io
.
open
(
params_file
,
'r'
,
encoding
=
'utf8'
)
as
f
:
self
.
params
=
yaml
.
safe_load
(
f
)
self
.
finetunee_script
=
finetunee_script
self
.
model_rewards
=
{}
def
get_init_params
(
self
):
init_params
=
[]
...
...
@@ -134,7 +135,7 @@ class FullTrailEvaluator(BaseEvaluator):
os
.
system
(
run_cmd
)
with
open
(
log_file
,
"r"
)
as
f
:
lines
=
f
.
readlines
()
eval_result
=
lines
[
-
1
]
eval_result
=
float
(
lines
[
-
1
])
except
:
print
(
"WARNING: Program which was ran with hyperparameters as %s was crashed!"
...
...
@@ -148,7 +149,6 @@ class FullTrailEvaluator(BaseEvaluator):
class
ModelBasedEvaluator
(
BaseEvaluator
):
def
__init__
(
self
,
params_file
,
finetunee_script
):
super
(
ModelBasedEvaluator
,
self
).
__init__
(
params_file
,
finetunee_script
)
self
.
model_rewards
=
{}
self
.
half_best_model_ckpt
=
[]
self
.
run_count
=
0
...
...
@@ -187,7 +187,7 @@ class ModelBasedEvaluator(BaseEvaluator):
os
.
system
(
run_cmd
)
with
open
(
log_file
,
"r"
)
as
f
:
lines
=
f
.
readlines
()
eval_result
=
lines
[
-
1
]
eval_result
=
float
(
lines
[
-
1
])
except
:
print
(
"WARNING: Program which was ran with hyperparameters as %s was crashed!"
...
...
@@ -198,7 +198,7 @@ class ModelBasedEvaluator(BaseEvaluator):
return
reward
def
new_round
(
self
):
"""update
self.
half_best_model"""
"""update half_best_model"""
half_size
=
int
(
len
(
self
.
model_rewards
)
/
2
)
if
half_size
<
1
:
half_size
=
1
...
...
paddlehub/commands/autofinetune.py
浏览文件 @
4dc27439
...
...
@@ -31,6 +31,7 @@ import numpy as np
from
paddlehub.commands.base_command
import
BaseCommand
,
ENTRY
from
paddlehub.common.arg_helper
import
add_argument
,
print_arguments
from
paddlehub.autofinetune.autoft
import
PSHE2
from
paddlehub.autofinetune.autoft
import
HAZero
from
paddlehub.autofinetune.evaluator
import
FullTrailEvaluator
from
paddlehub.autofinetune.evaluator
import
ModelBasedEvaluator
from
paddlehub.common.logger
import
logger
...
...
@@ -71,6 +72,7 @@ class AutoFineTuneCommand(BaseCommand):
"--cuda"
,
type
=
ast
.
literal_eval
,
default
=
[
'0'
],
required
=
True
,
help
=
"The list of gpu devices to be used"
)
self
.
arg_config_group
.
add_argument
(
"--round"
,
type
=
int
,
default
=
10
,
help
=
"Number of searches"
)
...
...
@@ -84,6 +86,11 @@ class AutoFineTuneCommand(BaseCommand):
type
=
str
,
default
=
"fulltrail"
,
help
=
"Choices: fulltrail or modelbased."
)
self
.
arg_config_group
.
add_argument
(
"--tuning_strategy"
,
type
=
str
,
default
=
"HAZero"
,
help
=
"Choices: HAZero or PSHE2."
)
def
execute
(
self
,
argv
):
if
not
argv
:
...
...
@@ -121,11 +128,21 @@ class AutoFineTuneCommand(BaseCommand):
raise
ValueError
(
"The evaluate %s is not defined!"
%
self
.
args
.
evaluate_choice
)
autoft
=
PSHE2
(
evaluator
,
cudas
=
self
.
args
.
cuda
,
popsize
=
self
.
args
.
popsize
,
output_dir
=
self
.
args
.
output_dir
)
if
self
.
args
.
tuning_strategy
.
lower
()
==
"hazero"
:
autoft
=
HAZero
(
evaluator
,
cudas
=
self
.
args
.
cuda
,
popsize
=
self
.
args
.
popsize
,
output_dir
=
self
.
args
.
output_dir
)
elif
self
.
args
.
tuning_strategy
.
lower
()
==
"pshe2"
:
autoft
=
PSHE2
(
evaluator
,
cudas
=
self
.
args
.
cuda
,
popsize
=
self
.
args
.
popsize
,
output_dir
=
self
.
args
.
output_dir
)
else
:
raise
ValueError
(
"The tuning strategy %s is not defined!"
%
self
.
args
.
tuning_strategy
)
run_round_cnt
=
0
solutions_ckptdirs
=
{}
...
...
@@ -138,23 +155,21 @@ class AutoFineTuneCommand(BaseCommand):
evaluator
.
new_round
()
run_round_cnt
=
run_round_cnt
+
1
print
(
"PaddleHub Autofinetune ends."
)
with
open
(
"./log_file.txt"
,
"w"
)
as
f
:
best_choice
=
evaluator
.
convert_params
(
autoft
.
optimal_solution
())
print
(
"The best hyperparameters:"
)
f
.
write
(
"The best hyperparameters:
\n
"
)
param_name
=
[]
for
idx
,
param
in
enumerate
(
evaluator
.
params
[
"param_list"
]):
param_name
.
append
(
param
[
"name"
])
f
.
write
(
param
[
"name"
]
+
"
\t
:
\t
"
+
str
(
best_choice
[
idx
])
+
"
\n
"
)
print
(
"%s : %s"
%
(
param
[
"name"
],
best_choice
[
idx
]))
best_hparams
=
evaluator
.
convert_params
(
autoft
.
get_best_hparams
())
print
(
"The final best hyperparameters:"
)
f
.
write
(
"The final best hyperparameters:
\n
"
)
for
index
,
hparam_name
in
enumerate
(
autoft
.
hparams_name_list
):
print
(
"%s=%s"
%
(
hparam_name
,
best_hparams
[
index
]))
f
.
write
(
hparam_name
+
"
\t
:
\t
"
+
str
(
best_hparams
[
index
])
+
"
\n
"
)
f
.
write
(
"
\n\n\n
"
)
f
.
write
(
"
\t
"
.
join
(
param_name
)
+
"
\t
output_dir
\n\n
"
)
f
.
write
(
"
\t
"
.
join
(
autoft
.
hparams_name_list
)
+
"
\t
output_dir
\n\n
"
)
logger
.
info
(
"The checkpont directory of programs ran with paramemters searched are saved as log_file.txt ."
"The checkpont directory of programs ran with
hyper
paramemters searched are saved as log_file.txt ."
)
print
(
"The checkpont directory of programs ran with paramemters searched are saved as log_file.txt ."
"The checkpont directory of programs ran with
hyper
paramemters searched are saved as log_file.txt ."
)
for
solution
,
ckptdir
in
solutions_ckptdirs
.
items
():
param
=
evaluator
.
convert_params
(
solution
)
...
...
paddlehub/commands/config.py
浏览文件 @
4dc27439
...
...
@@ -67,25 +67,14 @@ class ConfigCommand(BaseCommand):
print
(
"Set success! The current configuration is shown below."
)
print
(
json
.
dumps
(
config
,
indent
=
4
))
@
staticmethod
def
show_server_url
():
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
config
=
json
.
load
(
fp
)
print
(
config
[
"server_url"
])
@
staticmethod
def
show_log_level
():
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
print
(
json
.
load
(
fp
)[
"log_level"
])
@
staticmethod
def
set_log_level
(
level
):
level
=
str
(
level
).
upper
()
if
level
not
in
[
"CRITICAL"
,
"FATAL"
,
"ERROR"
,
"WARN"
,
"WARNING"
,
"INFO"
,
"DEBUG"
,
"NOTSET"
"NOLOG"
,
"DEBUG"
,
"INFO"
,
"WARNING"
,
"ERROR"
,
"CRITICAL"
]:
print
(
"Allowed values include: "
"
CRITICAL, FATAL, ERROR, WARN, WARNING, INFO, DEBUG, NOTSET
"
)
"
NOLOG, DEBUG, INFO, WARNING, ERROR, CRITICAL
"
)
return
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
current_config
=
json
.
load
(
fp
)
...
...
@@ -97,18 +86,14 @@ class ConfigCommand(BaseCommand):
@
staticmethod
def
show_help
():
str
=
"config <option>
<value>
\n
"
str
=
"config <option>
\n
"
str
+=
"
\t
Show hub server config without any option.
\n
"
str
+=
"option:
\n
"
str
+=
"reset
\n
"
str
+=
"
\t
Reset config as default.
\n
"
str
+=
"server
\n
"
str
+=
"
\t
Show server url.
\n
"
str
+=
"server [URL]
\n
"
str
+=
"server==[URL]
\n
"
str
+=
"
\t
Set hub server url as [URL].
\n
"
str
+=
"log
\n
"
str
+=
"
\t
Show log level.
\n
"
str
+=
"log [LEVEL]
\n
"
str
+=
"log==[LEVEL]
\n
"
str
+=
"
\t
Set log level as [LEVEL:NOLOG, DEBUG, INFO, WARNING, ERROR, CRITICAL].
\n
"
print
(
str
)
...
...
@@ -118,16 +103,10 @@ class ConfigCommand(BaseCommand):
ConfigCommand
.
show_config
()
elif
args
.
option
==
"reset"
:
ConfigCommand
.
set_config
(
default_server_config
)
elif
args
.
option
==
"server"
:
if
args
.
value
is
not
None
:
ConfigCommand
.
set_server_url
(
args
.
value
)
else
:
ConfigCommand
.
show_server_url
()
elif
args
.
option
==
"log"
:
if
args
.
value
is
not
None
:
ConfigCommand
.
set_log_level
(
args
.
value
)
else
:
ConfigCommand
.
show_log_level
()
elif
args
.
option
.
startswith
(
"server=="
):
ConfigCommand
.
set_server_url
(
args
.
option
.
split
(
"=="
)[
1
])
elif
args
.
option
.
startswith
(
"log=="
):
ConfigCommand
.
set_log_level
(
args
.
option
.
split
(
"=="
)[
1
])
else
:
ConfigCommand
.
show_help
()
return
True
...
...
paddlehub/common/hub_server.py
浏览文件 @
4dc27439
...
...
@@ -24,7 +24,6 @@ import requests
import
json
import
yaml
import
random
import
fcntl
from
random
import
randint
from
paddlehub.common
import
utils
,
srv_utils
...
...
@@ -39,9 +38,6 @@ CACHE_TIME = 60 * 10
class
HubServer
(
object
):
def
__init__
(
self
,
config_file_path
=
None
):
LOCK_FILE
=
os
.
path
.
join
(
hub
.
HUB_HOME
,
'__LOCK__'
)
LOCK_FP
=
open
(
LOCK_FILE
,
'a+'
)
fcntl
.
flock
(
LOCK_FP
.
fileno
(),
fcntl
.
LOCK_EX
)
if
not
config_file_path
:
config_file_path
=
os
.
path
.
join
(
hub
.
CONF_HOME
,
'config.json'
)
if
not
os
.
path
.
exists
(
hub
.
CONF_HOME
):
...
...
@@ -57,7 +53,6 @@ class HubServer(object):
self
.
server_url
=
self
.
config
[
'server_url'
]
self
.
request
()
self
.
_load_resource_list_file_if_valid
()
LOCK_FP
.
close
()
def
get_server_url
(
self
):
random
.
seed
(
int
(
time
.
time
()))
...
...
paddlehub/common/logger.py
浏览文件 @
4dc27439
...
...
@@ -39,12 +39,13 @@ class Logger(object):
self
.
handler
.
setFormatter
(
self
.
format
)
self
.
logger
.
addHandler
(
self
.
handler
)
if
not
os
.
path
.
exists
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
)):
self
.
logLevel
=
"DEBUG"
else
:
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
self
.
logLevel
=
json
.
load
(
fp
).
get
(
"log_level"
,
"DEBUG"
)
self
.
logLevel
=
"DEBUG"
self
.
logger
.
setLevel
(
self
.
_get_logging_level
())
if
os
.
path
.
exists
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
)):
with
open
(
os
.
path
.
join
(
CONF_HOME
,
"config.json"
),
"r"
)
as
fp
:
level
=
json
.
load
(
fp
).
get
(
"log_level"
,
"DEBUG"
)
self
.
logLevel
=
level
self
.
setLevel
(
level
)
def
_is_no_log
(
self
):
return
self
.
getLevel
()
==
Logger
.
NOLOG
...
...
paddlehub/finetune/strategy.py
浏览文件 @
4dc27439
...
...
@@ -477,7 +477,7 @@ class CombinedStrategy(DefaultStrategy):
pass
def
__str__
(
self
):
return
"Strategy with sheduler: %s, regularization: %s and clip: %s"
%
(
return
"Strategy with s
c
heduler: %s, regularization: %s and clip: %s"
%
(
self
.
scheduler
,
self
.
regularization
,
self
.
clip
)
...
...
paddlehub/finetune/task/basic_task.py
浏览文件 @
4dc27439
...
...
@@ -458,14 +458,14 @@ class BasicTask(object):
def
_eval_end_event
(
self
,
run_states
):
eval_scores
,
eval_loss
,
run_speed
=
self
.
_calculate_metrics
(
run_states
)
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/Loss [{}]
"
.
format
(
self
.
phase
),
tag
=
"Loss_{}
"
.
format
(
self
.
phase
),
scalar_value
=
eval_loss
,
global_step
=
self
.
current_step
)
log_scores
=
""
for
metric
in
eval_scores
:
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/{} [{}]
"
.
format
(
metric
,
self
.
phase
),
tag
=
"{}_{}
"
.
format
(
metric
,
self
.
phase
),
scalar_value
=
eval_scores
[
metric
],
global_step
=
self
.
current_step
)
...
...
@@ -498,13 +498,13 @@ class BasicTask(object):
def
_log_interval_event
(
self
,
run_states
):
scores
,
avg_loss
,
run_speed
=
self
.
_calculate_metrics
(
run_states
)
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/Loss [{}]
"
.
format
(
self
.
phase
),
tag
=
"Loss_{}
"
.
format
(
self
.
phase
),
scalar_value
=
avg_loss
,
global_step
=
self
.
current_step
)
log_scores
=
""
for
metric
in
scores
:
self
.
tb_writer
.
add_scalar
(
tag
=
self
.
phase
+
"/{} [{}]
"
.
format
(
metric
,
self
.
phase
),
tag
=
"{}_{}
"
.
format
(
metric
,
self
.
phase
),
scalar_value
=
scores
[
metric
],
global_step
=
self
.
current_step
)
log_scores
+=
"%s=%.5f "
%
(
metric
,
scores
[
metric
])
...
...
tutorial/autofinetune.ipynb
浏览文件 @
4dc27439
...
...
@@ -192,7 +192,7 @@
"\n",
"> `--evaluate_choice`: 设置自动搜索超参的评价效果方式,可选fulltrail和modelbased, 默认为fulltrail\n",
"\n",
"> `--strategy`: 设置自动搜索超参策略,可选hazero和pshe2,默认为hazero\n"
"> `--
tuning_
strategy`: 设置自动搜索超参策略,可选hazero和pshe2,默认为hazero\n"
]
},
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录