Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleFL
提交
cd696e5d
P
PaddleFL
项目概览
PaddlePaddle
/
PaddleFL
通知
35
Star
5
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
6
列表
看板
标记
里程碑
合并请求
4
Wiki
3
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleFL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
6
Issue
6
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
3
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cd696e5d
编写于
11月 22, 2019
作者:
Q
qjing666
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add femnist_demo
上级
da0d1ba0
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
208 addition
and
0 deletion
+208
-0
paddle_fl/examples/femnist_demo/download.sh
paddle_fl/examples/femnist_demo/download.sh
+2
-0
paddle_fl/examples/femnist_demo/fl_master.py
paddle_fl/examples/femnist_demo/fl_master.py
+43
-0
paddle_fl/examples/femnist_demo/fl_scheduler.py
paddle_fl/examples/femnist_demo/fl_scheduler.py
+9
-0
paddle_fl/examples/femnist_demo/fl_server.py
paddle_fl/examples/femnist_demo/fl_server.py
+13
-0
paddle_fl/examples/femnist_demo/fl_trainer.py
paddle_fl/examples/femnist_demo/fl_trainer.py
+122
-0
paddle_fl/examples/femnist_demo/run.sh
paddle_fl/examples/femnist_demo/run.sh
+14
-0
paddle_fl/examples/femnist_demo/stop.sh
paddle_fl/examples/femnist_demo/stop.sh
+5
-0
未找到文件。
paddle_fl/examples/femnist_demo/download.sh
0 → 100644
浏览文件 @
cd696e5d
wget
--no-check-certificate
https://paddlefl.bj.bcebos.com/leaf/femnist_data.tar.gz
tar
xvf femnist_data.tar.gz
paddle_fl/examples/femnist_demo/fl_master.py
0 → 100644
浏览文件 @
cd696e5d
import
paddle.fluid
as
fluid
import
paddle_fl
as
fl
from
paddle_fl.core.master.job_generator
import
JobGenerator
from
paddle_fl.core.strategy.fl_strategy_base
import
FLStrategyFactory
class
Model
(
object
):
def
__init__
(
self
):
pass
def
cnn
(
self
):
self
.
inputs
=
fluid
.
layers
.
data
(
name
=
'img'
,
shape
=
[
1
,
28
,
28
],
dtype
=
"float32"
)
self
.
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
self
.
conv_pool_1
=
fluid
.
nets
.
simple_img_conv_pool
(
input
=
self
.
inputs
,
num_filters
=
20
,
filter_size
=
5
,
pool_size
=
2
,
pool_stride
=
2
,
act
=
'relu'
)
self
.
conv_pool_2
=
fluid
.
nets
.
simple_img_conv_pool
(
input
=
self
.
conv_pool_1
,
num_filters
=
50
,
filter_size
=
5
,
pool_size
=
2
,
pool_stride
=
2
,
act
=
'relu'
)
self
.
predict
=
self
.
predict
=
fluid
.
layers
.
fc
(
input
=
self
.
conv_pool_2
,
size
=
62
,
act
=
'softmax'
)
self
.
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
self
.
predict
,
label
=
self
.
label
)
self
.
accuracy
=
fluid
.
layers
.
accuracy
(
input
=
self
.
predict
,
label
=
self
.
label
)
self
.
loss
=
fluid
.
layers
.
mean
(
self
.
cost
)
self
.
startup_program
=
fluid
.
default_startup_program
()
model
=
Model
()
model
.
cnn
()
job_generator
=
JobGenerator
()
optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.1
)
job_generator
.
set_optimizer
(
optimizer
)
job_generator
.
set_losses
([
model
.
loss
])
job_generator
.
set_startup_program
(
model
.
startup_program
)
job_generator
.
set_infer_feed_and_target_names
(
[
model
.
inputs
.
name
,
model
.
label
.
name
],
[
model
.
loss
.
name
,
model
.
accuracy
.
name
])
build_strategy
=
FLStrategyFactory
()
build_strategy
.
fed_avg
=
True
build_strategy
.
inner_step
=
1
strategy
=
build_strategy
.
create_fl_strategy
()
endpoints
=
[
"127.0.0.1:8181"
]
output
=
"fl_job_config"
job_generator
.
generate_fl_job
(
strategy
,
server_endpoints
=
endpoints
,
worker_num
=
4
,
output
=
output
)
paddle_fl/examples/femnist_demo/fl_scheduler.py
0 → 100644
浏览文件 @
cd696e5d
from
paddle_fl.core.scheduler.agent_master
import
FLScheduler
worker_num
=
4
server_num
=
1
scheduler
=
FLScheduler
(
worker_num
,
server_num
)
scheduler
.
set_sample_worker_num
(
4
)
scheduler
.
init_env
()
print
(
"init env done."
)
scheduler
.
start_fl_training
()
paddle_fl/examples/femnist_demo/fl_server.py
0 → 100644
浏览文件 @
cd696e5d
import
paddle_fl
as
fl
import
paddle.fluid
as
fluid
from
paddle_fl.core.server.fl_server
import
FLServer
from
paddle_fl.core.master.fl_job
import
FLRunTimeJob
server
=
FLServer
()
server_id
=
0
job_path
=
"fl_job_config"
job
=
FLRunTimeJob
()
job
.
load_server_job
(
job_path
,
server_id
)
job
.
_scheduler_ep
=
"127.0.0.1:9091"
server
.
set_server_job
(
job
)
server
.
_current_ep
=
"127.0.0.1:8181"
server
.
start
()
paddle_fl/examples/femnist_demo/fl_trainer.py
0 → 100644
浏览文件 @
cd696e5d
from
paddle_fl.core.trainer.fl_trainer
import
FLTrainerFactory
from
paddle_fl.core.master.fl_job
import
FLRunTimeJob
import
numpy
import
sys
import
paddle
import
paddle.fluid
as
fluid
import
logging
import
math
import
random
import
json
logging
.
basicConfig
(
filename
=
"test.log"
,
filemode
=
"w"
,
format
=
"%(asctime)s %(name)s:%(levelname)s:%(message)s"
,
datefmt
=
"%d-%M-%Y %H:%M:%S"
,
level
=
logging
.
DEBUG
)
trainer_id
=
int
(
sys
.
argv
[
1
])
# trainer id for each guest
job_path
=
"fl_job_config"
job
=
FLRunTimeJob
()
job
.
load_trainer_job
(
job_path
,
trainer_id
)
job
.
_scheduler_ep
=
"127.0.0.1:9091"
trainer
=
FLTrainerFactory
().
create_fl_trainer
(
job
)
trainer
.
_current_ep
=
"127.0.0.1:{}"
.
format
(
9000
+
trainer_id
)
trainer
.
start
()
print
(
trainer
.
_step
)
test_program
=
trainer
.
_main_program
.
clone
(
for_test
=
True
)
def
data_generater
(
trainer_id
,
inner_step
,
batch_size
,
count_by_step
):
train_file
=
open
(
"./femnist_data/train/all_data_%d_niid_0_keep_0_train_9.json"
%
trainer_id
,
'r'
)
test_file
=
open
(
"./femnist_data/test/all_data_%d_niid_0_keep_0_test_9.json"
%
trainer_id
,
'r'
)
json_train
=
json
.
load
(
train_file
)
json_test
=
json
.
load
(
test_file
)
users
=
json_train
[
"users"
]
rand
=
random
.
randrange
(
0
,
len
(
users
))
# random choose a user from each trainer
cur_user
=
users
[
rand
]
print
(
'training using '
+
cur_user
)
def
train_data
():
train_images
=
json_train
[
"user_data"
][
cur_user
][
'x'
]
train_labels
=
json_train
[
"user_data"
][
cur_user
][
'y'
]
if
count_by_step
:
for
i
in
xrange
(
inner_step
*
batch_size
):
yield
train_images
[
i
%
(
len
(
train_images
))],
train_labels
[
i
%
(
len
(
train_images
))]
else
:
for
i
in
xrange
(
len
(
train_images
)):
yield
train_images
[
i
],
train_labels
[
i
]
def
test_data
():
for
user
in
users
:
test_images
=
json_test
[
'user_data'
][
user
][
'x'
]
test_labels
=
json_test
[
'user_data'
][
user
][
'y'
]
for
i
in
xrange
(
len
(
test_images
)):
yield
test_images
[
i
],
test_labels
[
i
]
train_file
.
close
()
test_file
.
close
()
return
train_data
,
test_data
img
=
fluid
.
layers
.
data
(
name
=
'img'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
img
,
label
],
place
=
fluid
.
CPUPlace
())
def
train_test
(
train_test_program
,
train_test_feed
,
train_test_reader
):
acc_set
=
[]
for
test_data
in
train_test_reader
():
acc_np
=
trainer
.
exe
.
run
(
program
=
train_test_program
,
feed
=
train_test_feed
.
feed
(
test_data
),
fetch_list
=
[
"accuracy_0.tmp_0"
])
acc_set
.
append
(
float
(
acc_np
[
0
]))
acc_val_mean
=
numpy
.
array
(
acc_set
).
mean
()
return
acc_val_mean
def
compute_privacy_budget
(
sample_ratio
,
epsilon
,
step
,
delta
):
E
=
2
*
epsilon
*
math
.
sqrt
(
step
*
sample_ratio
)
print
(
"({0}, {1})-DP"
.
format
(
E
,
delta
))
epoch_id
=
0
step
=
0
epoch
=
3000
count_by_step
=
False
if
count_by_step
:
output_folder
=
"model_node%d"
%
trainer_id
else
:
output_folder
=
"model_node%d_epoch"
%
trainer_id
while
not
trainer
.
stop
():
count
=
0
epoch_id
+=
1
if
epoch_id
>
epoch
:
break
print
(
"epoch %d start train"
%
(
epoch_id
))
train_data
,
test_data
=
data_generater
(
trainer_id
,
inner_step
=
trainer
.
_step
,
batch_size
=
64
,
count_by_step
=
count_by_step
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
train_data
,
buf_size
=
500
),
batch_size
=
64
)
test_reader
=
paddle
.
batch
(
test_data
,
batch_size
=
64
)
if
count_by_step
:
for
step_id
,
data
in
enumerate
(
train_reader
()):
acc
=
trainer
.
run
(
feeder
.
feed
(
data
),
fetch
=
[
"accuracy_0.tmp_0"
])
step
+=
1
count
+=
1
print
(
count
)
if
count
%
trainer
.
_step
==
0
:
break
# print("acc:%.3f" % (acc[0]))
else
:
trainer
.
run_with_epoch
(
train_reader
,
feeder
,
fetch
=
[
"accuracy_0.tmp_0"
],
num_epoch
=
1
)
acc_val
=
train_test
(
train_test_program
=
test_program
,
train_test_reader
=
test_reader
,
train_test_feed
=
feeder
)
print
(
"Test with epoch %d, accuracy: %s"
%
(
epoch_id
,
acc_val
))
compute_privacy_budget
(
sample_ratio
=
0.001
,
epsilon
=
0.1
,
step
=
step
,
delta
=
0.00001
)
if
trainer_id
==
0
:
save_dir
=
(
output_folder
+
"/epoch_%d"
)
%
epoch_id
trainer
.
save_inference_program
(
output_folder
)
paddle_fl/examples/femnist_demo/run.sh
0 → 100644
浏览文件 @
cd696e5d
#killall python
#python fl_master.py
#sleep 2
python
-u
fl_server.py
>
log/server0.log &
sleep
2
python
-u
fl_scheduler.py
>
scheduler.log &
sleep
2
python
-u
fl_server.py
>
server0.log &
sleep
2
for
((
i
=
0
;
i<4
;
i++
))
do
python
-u
fl_trainer.py
$i
>
trainer
$i
.log &
sleep
2
done
paddle_fl/examples/femnist_demo/stop.sh
0 → 100644
浏览文件 @
cd696e5d
#!/bin/bash
echo
"Stop service!"
ps
-ef
|
grep
-E
"fl"
|
grep
-v
grep
|
awk
'{print $2}'
| xargs
kill
-9
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录