Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleFL
提交
babe466a
P
PaddleFL
项目概览
PaddlePaddle
/
PaddleFL
通知
35
Star
5
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
6
列表
看板
标记
里程碑
合并请求
4
Wiki
3
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleFL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
6
Issue
6
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
3
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
babe466a
编写于
7月 31, 2020
作者:
Q
Qinghe JING
提交者:
GitHub
7月 31, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #88 from hysunflower/update_scripts
Update scripts
上级
7a30fad3
7755e78d
变更
13
显示空白变更内容
内联
并排
Showing
13 changed file
with
100 addition
and
68 deletion
+100
-68
python/paddle_fl/paddle_fl/examples/ctr_demo/fl_trainer.py
python/paddle_fl/paddle_fl/examples/ctr_demo/fl_trainer.py
+1
-1
python/paddle_fl/paddle_fl/examples/ctr_demo/run.sh
python/paddle_fl/paddle_fl/examples/ctr_demo/run.sh
+2
-1
python/paddle_fl/paddle_fl/examples/dpsgd_demo/fl_trainer.py
python/paddle_fl/paddle_fl/examples/dpsgd_demo/fl_trainer.py
+3
-2
python/paddle_fl/paddle_fl/examples/dpsgd_demo/run.sh
python/paddle_fl/paddle_fl/examples/dpsgd_demo/run.sh
+15
-11
python/paddle_fl/paddle_fl/examples/femnist_demo/fl_trainer.py
...n/paddle_fl/paddle_fl/examples/femnist_demo/fl_trainer.py
+3
-3
python/paddle_fl/paddle_fl/examples/femnist_demo/run.sh
python/paddle_fl/paddle_fl/examples/femnist_demo/run.sh
+11
-6
python/paddle_fl/paddle_fl/examples/generate_job_from_program/README.md
...fl/paddle_fl/examples/generate_job_from_program/README.md
+4
-0
python/paddle_fl/paddle_fl/examples/generate_job_from_program/fl_trainer.py
...addle_fl/examples/generate_job_from_program/fl_trainer.py
+3
-2
python/paddle_fl/paddle_fl/examples/generate_job_from_program/run.sh
...le_fl/paddle_fl/examples/generate_job_from_program/run.sh
+17
-9
python/paddle_fl/paddle_fl/examples/gru4rec_demo/fl_trainer.py
...n/paddle_fl/paddle_fl/examples/gru4rec_demo/fl_trainer.py
+7
-6
python/paddle_fl/paddle_fl/examples/gru4rec_demo/run.sh
python/paddle_fl/paddle_fl/examples/gru4rec_demo/run.sh
+19
-10
python/paddle_fl/paddle_fl/examples/secagg_demo/fl_trainer.py
...on/paddle_fl/paddle_fl/examples/secagg_demo/fl_trainer.py
+2
-7
python/paddle_fl/paddle_fl/examples/secagg_demo/run.sh
python/paddle_fl/paddle_fl/examples/secagg_demo/run.sh
+13
-10
未找到文件。
python/paddle_fl/paddle_fl/examples/ctr_demo/fl_trainer.py
浏览文件 @
babe466a
...
...
@@ -50,7 +50,7 @@ epoch_id = 0
while
not
trainer
.
stop
():
if
epoch_id
>
15
:
break
print
(
"{}
e
poch {} start train"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_id
))
print
(
"{}
E
poch {} start train"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_id
))
train_step
=
0
for
data
in
reader
():
trainer
.
run
(
feed
=
data
,
fetch
=
[])
...
...
python/paddle_fl/paddle_fl/examples/ctr_demo/run.sh
浏览文件 @
babe466a
#!/bin/bash
unset
http_proxy
unset
https_proxy
ps
-ef
|
grep
-E
fl_ |
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
log_dir
=
${
1
:-
$(
pwd
)
}
log_dir
=
${
1
:-
"logs"
}
mkdir
-p
${
log_dir
}
python fl_master.py
>
${
log_dir
}
/master.log 2>&1 &
...
...
python/paddle_fl/paddle_fl/examples/dpsgd_demo/fl_trainer.py
浏览文件 @
babe466a
...
...
@@ -20,6 +20,7 @@ import paddle
import
paddle.fluid
as
fluid
import
logging
import
math
import
time
logging
.
basicConfig
(
filename
=
"test.log"
,
...
...
@@ -72,9 +73,9 @@ epoch_id = 0
step
=
0
while
not
trainer
.
stop
():
epoch_id
+=
1
if
epoch_id
>
4
0
:
if
epoch_id
>
1
0
:
break
print
(
"
epoch %d start train"
%
(
epoch_id
))
print
(
"
{} Epoch {} start train"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_id
))
for
step_id
,
data
in
enumerate
(
train_reader
()):
acc
=
trainer
.
run
(
feeder
.
feed
(
data
),
fetch
=
[
"accuracy_0.tmp_0"
])
step
+=
1
...
...
python/paddle_fl/paddle_fl/examples/dpsgd_demo/run.sh
浏览文件 @
babe466a
#!/bin/bash
unset
http_proxy
unset
https_proxy
python fl_master.py
ps
-ef
|
grep
-E
fl_ |
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
log_dir
=
${
1
:-
"logs"
}
mkdir
-p
${
log_dir
}
python fl_master.py
>
${
log_dir
}
/master.log 2>&1 &
sleep
2
python
-u
fl_scheduler.py
>
scheduler.log &
python
-u
fl_scheduler.py
>
${
log_dir
}
/scheduler.log 2>&1 &
sleep
5
python
-u
fl_server.py
>
${
log_dir
}
/server0.log 2>&1 &
sleep
2
python
-u
fl_server.py
>
server0.log &
sleep
2
python
-u
fl_trainer.py 0
>
trainer0.log &
sleep
2
python
-u
fl_trainer.py 1
>
trainer1.log &
sleep
2
python
-u
fl_trainer.py 2
>
trainer2.log &
sleep
2
python
-u
fl_trainer.py 3
>
trainer3.log &
for
((
i
=
0
;
i<4
;
i++
))
do
python
-u
fl_trainer.py
$i
>
${
log_dir
}
/trainer
$i
.log 2>&1 &
sleep
2
done
python/paddle_fl/paddle_fl/examples/femnist_demo/fl_trainer.py
浏览文件 @
babe466a
...
...
@@ -21,6 +21,7 @@ import paddle
import
paddle.fluid
as
fluid
import
logging
import
math
import
time
logging
.
basicConfig
(
filename
=
"test.log"
,
...
...
@@ -60,7 +61,7 @@ def train_test(train_test_program, train_test_feed, train_test_reader):
epoch_id
=
0
step
=
0
epoch
=
300
0
epoch
=
1
0
count_by_step
=
False
if
count_by_step
:
output_folder
=
"model_node%d"
%
trainer_id
...
...
@@ -72,7 +73,7 @@ while not trainer.stop():
epoch_id
+=
1
if
epoch_id
>
epoch
:
break
print
(
"
epoch %d start train"
%
(
epoch_id
))
print
(
"
{} Epoch {} start train"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_id
))
#train_data,test_data= data_generater(trainer_id,inner_step=trainer._step,batch_size=64,count_by_step=count_by_step)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
...
...
@@ -97,7 +98,6 @@ while not trainer.stop():
acc
=
trainer
.
run
(
feeder
.
feed
(
data
),
fetch
=
[
"accuracy_0.tmp_0"
])
step
+=
1
count
+=
1
print
(
count
)
if
count
%
trainer
.
_step
==
0
:
break
# print("acc:%.3f" % (acc[0]))
...
...
python/paddle_fl/paddle_fl/examples/femnist_demo/run.sh
浏览文件 @
babe466a
#!/bin/bash
unset
http_proxy
unset
https_proxy
#killall python
python fl_master.py
ps
-ef
|
grep
-E
fl_ |
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
log_dir
=
${
1
:-
"logs"
}
mkdir
-p
${
log_dir
}
python fl_master.py
>
${
log_dir
}
/master.log 2>&1 &
sleep
2
python
-u
fl_scheduler.py
>
scheduler.log
&
sleep
2
python
-u
fl_server.py
>
server0.log
&
python
-u
fl_scheduler.py
>
${
log_dir
}
/scheduler.log 2>&1
&
sleep
5
python
-u
fl_server.py
>
${
log_dir
}
/server0.log 2>&1
&
sleep
2
for
((
i
=
0
;
i<4
;
i++
))
do
python
-u
fl_trainer.py
$i
>
trainer
$i
.log
&
python
-u
fl_trainer.py
$i
>
${
log_dir
}
/trainer
$i
.log 2>&1
&
sleep
2
done
python/paddle_fl/paddle_fl/examples/generate_job_from_program/README.md
浏览文件 @
babe466a
...
...
@@ -17,6 +17,10 @@ pip install paddle_fl
#### How to save a program
```
sh
python program_saver.py
```
In program_saver.py, you can defind a model. And save the program in to 'load_file'
```
python
...
...
python/paddle_fl/paddle_fl/examples/generate_job_from_program/fl_trainer.py
浏览文件 @
babe466a
...
...
@@ -20,6 +20,7 @@ import paddle
import
paddle.fluid
as
fluid
import
logging
import
math
import
time
logging
.
basicConfig
(
filename
=
"test.log"
,
...
...
@@ -67,9 +68,9 @@ epoch_id = 0
step
=
0
while
not
trainer
.
stop
():
epoch_id
+=
1
if
epoch_id
>
4
0
:
if
epoch_id
>
1
0
:
break
print
(
"
epoch %d start train"
%
(
epoch_id
))
print
(
"
{} Epoch {} start train"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_id
))
for
step_id
,
data
in
enumerate
(
train_reader
()):
acc
=
trainer
.
run
(
feeder
.
feed
(
data
),
fetch
=
[
"accuracy_0.tmp_0"
])
step
+=
1
...
...
python/paddle_fl/paddle_fl/examples/generate_job_from_program/run.sh
浏览文件 @
babe466a
#!/bin/bash
unset
http_proxy
unset
https_proxy
python program_saver.py
ps
-ef
|
grep
-E
fl_ |
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
if
[
!
-d
load_file
]
;
then
python program_saver.py
fi
python fl_master.py
sleep
2
python
-u
fl_scheduler.py
>
scheduler.log &
sleep
2
python
-u
fl_server.py
>
server0.log &
sleep
2
python
-u
fl_trainer.py 0
>
trainer0.log &
log_dir
=
${
1
:-
"logs"
}
mkdir
-p
${
log_dir
}
python fl_master.py
>
${
log_dir
}
/master.log 2>&1 &
sleep
2
python
-u
fl_trainer.py 1
>
trainer1.log &
python
-u
fl_scheduler.py
>
${
log_dir
}
/scheduler.log 2>&1 &
sleep
5
python
-u
fl_server.py
>
${
log_dir
}
/server0.log 2>&1 &
sleep
2
for
((
i
=
0
;
i<2
;
i++
))
do
python
-u
fl_trainer.py
$i
>
${
log_dir
}
/trainer
$i
.log 2>&1 &
sleep
2
done
python/paddle_fl/paddle_fl/examples/gru4rec_demo/fl_trainer.py
浏览文件 @
babe466a
...
...
@@ -20,6 +20,8 @@ import numpy as np
import
sys
import
os
import
logging
import
time
logging
.
basicConfig
(
filename
=
"test.log"
,
filemode
=
"w"
,
...
...
@@ -43,10 +45,9 @@ r = Gru4rec_Reader()
train_reader
=
r
.
reader
(
train_file_dir
,
place
,
batch_size
=
125
)
output_folder
=
"model_node4"
step
_i
=
0
epoch
_i
=
0
while
not
trainer
.
stop
():
step_i
+=
1
print
(
"batch %d start train"
%
(
step_i
))
epoch_i
+=
1
train_step
=
0
for
data
in
train_reader
():
#print(np.array(data['src_wordseq']))
...
...
@@ -56,10 +57,10 @@ while not trainer.stop():
break
avg_ppl
=
np
.
exp
(
ret_avg_cost
[
0
])
newest_ppl
=
np
.
mean
(
avg_ppl
)
print
(
"
ppl:%.3f"
%
(
newest_ppl
))
save_dir
=
(
output_folder
+
"/epoch_%d"
)
%
step
_i
print
(
"
{} Epoch {} start train, train_step {}, ppl {}"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_i
,
train_step
,
newest_ppl
))
save_dir
=
(
output_folder
+
"/epoch_%d"
)
%
epoch
_i
if
trainer_id
==
0
:
print
(
"start save"
)
trainer
.
save_inference_program
(
save_dir
)
if
step_i
>=
40
:
if
epoch_i
>=
5
:
break
python/paddle_fl/paddle_fl/examples/gru4rec_demo/run.sh
浏览文件 @
babe466a
#!/bin/bash
unset
http_proxy
unset
https_proxy
python fl_master.py
ps
-ef
|
grep
-E
fl_ |
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
if
[
!
-d
mid_data
]
;
then
sh download.sh
fi
log_dir
=
${
1
:-
"logs"
}
mkdir
-p
${
log_dir
}
python fl_master.py
>
${
log_dir
}
/master.log 2>&1 &
sleep
2
python
-u
fl_scheduler.py
>
scheduler.log &
python
-u
fl_server.py
>
server0.log &
python
-u
fl_scheduler.py
>
${
log_dir
}
/scheduler.log 2>&1 &
sleep
5
python
-u
fl_server.py
>
${
log_dir
}
/server0.log 2>&1 &
sleep
2
python
-u
fl_trainer.py 0
>
trainer0.log &
sleep
2
python
-u
fl_trainer.py 1
>
trainer1.log &
sleep
2
python
-u
fl_trainer.py 2
>
trainer2.log &
sleep
2
python
-u
fl_trainer.py 3
>
trainer3.log &
for
((
i
=
0
;
i<4
;
i++
))
do
python
-u
fl_trainer.py
$i
>
${
log_dir
}
/trainer
$i
.log 2>&1 &
sleep
2
done
python/paddle_fl/paddle_fl/examples/secagg_demo/fl_trainer.py
浏览文件 @
babe466a
...
...
@@ -84,21 +84,16 @@ def train_test(train_test_program, train_test_feed, train_test_reader):
# for test
while
not
trainer
.
stop
():
epoch_id
+=
1
print
(
"epoch %d start train"
%
(
epoch_id
))
for
data
in
train_reader
():
step_i
+=
1
trainer
.
step_id
=
step_i
accuracy
,
=
trainer
.
run
(
feed
=
feeder
.
feed
(
data
),
fetch
=
[
"accuracy_0.tmp_0"
])
if
step_i
%
100
==
0
:
print
(
"
Epoch: {0}, step: {1}, accuracy: {2}"
.
format
(
print
(
"
{} Epoch {} start train, step: {}, accuracy: {}"
.
format
(
time
.
strftime
(
'%Y-%m-%d %H:%M:%S'
,
time
.
localtime
(
time
.
time
())),
epoch_id
,
step_i
,
accuracy
[
0
]))
print
(
step_i
)
avg_loss_val
,
acc_val
=
train_test
(
train_test_program
=
test_program
,
train_test_reader
=
test_reader
,
...
...
@@ -106,7 +101,7 @@ while not trainer.stop():
print
(
"Test with Epoch %d, avg_cost: %s, acc: %s"
%
(
epoch_id
,
avg_loss_val
,
acc_val
))
if
epoch_id
>
40
:
if
epoch_id
>
5
:
break
if
epoch_id
%
5
==
0
:
trainer
.
save_inference_program
(
output_folder
)
python/paddle_fl/paddle_fl/examples/secagg_demo/run.sh
浏览文件 @
babe466a
#!/bin/bash
unset
http_proxy
unset
https_proxy
ps
-ef
|
grep
-E
fl_ |
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
if
[
!
-d
log
]
;
then
mkdir
log
fi
log_dir
=
${
1
:-
"logs"
}
mkdir
-p
${
log_dir
}
python fl_master.py
python fl_master.py
>
${
log_dir
}
/master.log 2>&1 &
sleep
2
python
-u
fl_server.py
>
log/server0.log &
python
-u
fl_scheduler.py
>
${
log_dir
}
/scheduler.log 2>&1 &
sleep
5
python
-u
fl_server.py
>
${
log_dir
}
/server0.log 2>&1 &
sleep
2
python
-u
fl_scheduler.py
>
log/scheduler.log &
sleep
2
python
-u
fl_trainer.py 0
>
log/trainer0.log
&
sleep
2
python
-u
fl_trainer.py 1
>
log/trainer1.log &
for
((
i
=
0
;
i<2
;
i++
))
do
python
-u
fl_trainer.py
$i
>
${
log_dir
}
/trainer
$i
.log 2>&1
&
sleep
2
done
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录