Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleFL
提交
f5dfd705
P
PaddleFL
项目概览
PaddlePaddle
/
PaddleFL
通知
35
Star
5
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
6
列表
看板
标记
里程碑
合并请求
4
Wiki
3
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleFL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
6
Issue
6
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
3
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f5dfd705
编写于
5月 11, 2020
作者:
J
jingqinghe
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update mpc document
上级
bdd8ca20
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
67 addition
and
18 deletion
+67
-18
python/paddle_fl/mpc/README.md
python/paddle_fl/mpc/README.md
+67
-18
未找到文件。
python/paddle_fl/mpc/README.md
浏览文件 @
f5dfd705
...
...
@@ -127,27 +127,76 @@ role, addr, port = sys.argv[1], sys.argv[2], sys.argv[3]
# init the MPC environment
pfl_mpc
.
init
(
"aby3"
,
(
int
)
role
,
net_server_addr
=
addr
,
net_server_port
=
(
int
)
port
)
#data processing
BATCH_SIZE
=
10
feature_reader
=
aby3
.
load_aby3_shares
(
"/tmp/house_feature"
,
id
=
role
,
shape
=
(
13
,
))
label_reader
=
aby3
.
load_aby3_shares
(
"/tmp/house_label"
,
id
=
role
,
shape
=
(
1
,
))
batch_feature
=
aby3
.
batch
(
feature_reader
,
BATCH_SIZE
,
drop_last
=
True
)
batch_label
=
aby3
.
batch
(
label_reader
,
BATCH_SIZE
,
drop_last
=
True
)
# define encrypted variables
image
=
pfl_mpc
.
data
(
name
=
'image'
,
shape
=
[
None
,
784
],
dtype
=
'int64'
)
label
=
pfl_mpc
.
data
(
name
=
'label'
,
shape
=
[
None
,
1
],
dtype
=
'int64'
)
x
=
pfl_mpc
.
data
(
name
=
'x'
,
shape
=
[
BATCH_SIZE
,
13
],
dtype
=
'int64'
)
y
=
pfl_mpc
.
data
(
name
=
'y'
,
shape
=
[
BATCH_SIZE
,
1
],
dtype
=
'int64'
)
# async data loader
loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
[
x
,
y
],
capacity
=
BATCH_SIZE
)
batch_sample
=
paddle
.
reader
.
compose
(
batch_feature
,
batch_label
)
place
=
fluid
.
CPUPlace
()
loader
.
set_batch_generator
(
batch_sample
,
places
=
place
)
# define a secure training network
hidden
=
pfl_mpc
.
layers
.
fc
(
input
=
image
,
size
=
100
,
act
=
'relu'
)
prediction
=
pfl_mpc
.
layers
.
fc
(
input
=
hidden
,
size
=
10
,
act
=
'softmax'
)
cost
=
pfl_mpc
.
layers
.
square_error_cost
(
input
=
prediction
,
label
=
label
)
loss
=
pfl_mpc
.
layers
.
mean
(
cost
)
sgd
=
pfl_mpc
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd
.
minimize
(
loss
)
# Place the training on CPU
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
# use random numbers to simulate encrypted data, and start training
x
=
numpy
.
random
.
random
(
size
=
(
128
,
2
,
784
)).
astype
(
'int64'
)
y
=
numpy
.
random
.
random
(
size
=
(
128
,
2
,
1
)).
astype
(
'int64'
)
loss_data
,
=
exe
.
run
(
feed
=
{
'image'
:
x
,
'lable'
:
y
},
fetch_list
=
[
loss
.
name
])
y_pre
=
pfl_mpc
.
layers
.
fc
(
input
=
x
,
size
=
1
)
cost
=
pfl_mpc
.
layers
.
square_error_cost
(
input
=
y_pre
,
label
=
y
)
avg_loss
=
pfl_mpc
.
layers
.
mean
(
cost
)
optimizer
=
pfl_mpc
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
optimizer
.
minimize
(
avg_loss
)
# loss file that store encrypted loss
loss_file
=
"/tmp/uci_loss.part{}"
.
format
(
role
)
# start training
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
())
epoch_num
=
20
start_time
=
time
.
time
()
for
epoch_id
in
range
(
epoch_num
):
step
=
0
# feed data via loader
for
sample
in
loader
():
mpc_loss
=
exe
.
run
(
feed
=
sample
,
fetch_list
=
[
avg_loss
])
if
step
%
50
==
0
:
print
(
'Epoch={}, Step={}, Loss={}'
.
format
(
epoch_id
,
step
,
mpc_loss
))
with
open
(
loss_file
,
'ab'
)
as
f
:
f
.
write
(
np
.
array
(
mpc_loss
).
tostring
())
step
+=
1
end_time
=
time
.
time
()
# training time
print
(
'Mpc Training of Epoch={} Batch_size={}, cost time in seconds:{}'
.
format
(
epoch_num
,
BATCH_SIZE
,
(
end_time
-
start_time
)))
# do prediction
prediction_file
=
"/tmp/uci_prediction.part{}"
.
format
(
role
)
for
sample
in
loader
():
prediction
=
exe
.
run
(
program
=
infer_program
,
feed
=
sample
,
fetch_list
=
[
y_pre
])
with
open
(
prediction_file
,
'ab'
)
as
f
:
f
.
write
(
np
.
array
(
prediction
).
tostring
())
break
# reveal the loss and prediction
import
prepare_data
print
(
"uci_loss:"
)
prepare_data
.
load_decrypt_data
(
"/tmp/uci_loss"
,
(
1
,
))
print
(
"prediction:"
)
prepare_data
.
load_decrypt_data
(
"/tmp/uci_prediction"
,
(
BATCH_SIZE
,
))
```
#### Execution and results
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录