Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleFL
提交
63ae7e62
P
PaddleFL
项目概览
PaddlePaddle
/
PaddleFL
通知
35
Star
5
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
6
列表
看板
标记
里程碑
合并请求
4
Wiki
3
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleFL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
6
Issue
6
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
3
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
63ae7e62
编写于
9月 16, 2020
作者:
H
He, Kai
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add mpc operator add, move mean_normalize to ml.py
上级
57c82ab5
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
114 addition
and
123 deletion
+114
-123
core/paddlefl_mpc/mpc_protocol/aby3_operators.h
core/paddlefl_mpc/mpc_protocol/aby3_operators.h
+14
-7
core/paddlefl_mpc/mpc_protocol/mpc_operators.h
core/paddlefl_mpc/mpc_protocol/mpc_operators.h
+4
-0
core/paddlefl_mpc/operators/mpc_mean_normalize_op.h
core/paddlefl_mpc/operators/mpc_mean_normalize_op.h
+2
-2
python/paddle_fl/mpc/layers/__init__.py
python/paddle_fl/mpc/layers/__init__.py
+0
-3
python/paddle_fl/mpc/layers/data_preprocessing.py
python/paddle_fl/mpc/layers/data_preprocessing.py
+0
-107
python/paddle_fl/mpc/layers/ml.py
python/paddle_fl/mpc/layers/ml.py
+94
-4
未找到文件。
core/paddlefl_mpc/mpc_protocol/aby3_operators.h
浏览文件 @
63ae7e62
...
...
@@ -319,17 +319,24 @@ public:
auto
a_tuple
=
from_tensor
(
in
);
auto
a_
=
std
::
get
<
0
>
(
a_tuple
).
get
();
auto
out_tuple
=
from_tensor
(
out
);
auto
out_
=
std
::
get
<
0
>
(
out_tuple
).
get
();
if
(
pos_info
)
{
auto
b_tuple
=
from_tensor
<
BoolTensor
>
(
pos_info
);
auto
b_
=
std
::
get
<
0
>
(
b_tuple
).
get
();
auto
out_tuple
=
from_tensor
(
out
);
auto
out_
=
std
::
get
<
0
>
(
out_tuple
).
get
();
a_
->
max_pooling
(
out_
,
b_
);
}
else
{
a_
->
max_pooling
(
out_
,
nullptr
);
}
void
max
(
const
Tensor
*
in
,
Tensor
*
out
)
override
{
auto
a_tuple
=
from_tensor
(
in
);
auto
a_
=
std
::
get
<
0
>
(
a_tuple
).
get
();
auto
out_tuple
=
from_tensor
(
out
);
auto
out_
=
std
::
get
<
0
>
(
out_tuple
).
get
();
a_
->
max_pooling
(
out_
,
nullptr
);
}
void
inverse_square_root
(
const
Tensor
*
in
,
Tensor
*
out
)
override
{
...
...
core/paddlefl_mpc/mpc_protocol/mpc_operators.h
浏览文件 @
63ae7e62
...
...
@@ -82,6 +82,10 @@ public:
// for filter in other shape, reshape input first
virtual
void
max_pooling
(
const
Tensor
*
in
,
Tensor
*
out
,
Tensor
*
pos_info
)
{}
// column wise max
// in shape [n, ...], out shape [1, ...]
virtual
void
max
(
const
Tensor
*
in
,
Tensor
*
out
)
{}
virtual
void
inverse_square_root
(
const
Tensor
*
in
,
Tensor
*
out
)
=
0
;
virtual
void
predicts_to_indices
(
const
Tensor
*
in
,
...
...
core/paddlefl_mpc/operators/mpc_mean_normalize_op.h
浏览文件 @
63ae7e62
...
...
@@ -54,10 +54,10 @@ class MpcMeanNormalizationKernel : public MpcOpKernel<T> {
->
mpc_operators
()
->
neg
(
min
,
&
neg_min
);
mpc
::
MpcInstance
::
mpc_instance
()
->
mpc_protocol
()
->
mpc_operators
()
->
max
_pooling
(
&
neg_min
,
&
neg_min_global
,
nullptr
);
->
mpc_operators
()
->
max
(
&
neg_min
,
&
neg_min_global
);
mpc
::
MpcInstance
::
mpc_instance
()
->
mpc_protocol
()
->
mpc_operators
()
->
max
_pooling
(
max
,
&
max_global
,
nullptr
);
->
mpc_operators
()
->
max
(
max
,
&
max_global
);
range
->
mutable_data
<
T
>
(
framework
::
make_ddim
({
share_num
,
1
,
feat_num
}),
context
.
GetPlace
(),
0
);
...
...
python/paddle_fl/mpc/layers/__init__.py
浏览文件 @
63ae7e62
...
...
@@ -37,8 +37,6 @@ from . import rnn
from
.rnn
import
*
from
.
import
metric_op
from
.metric_op
import
*
from
.
import
data_preprocessing
from
.data_preprocessing
import
*
__all__
=
[]
__all__
+=
basic
.
__all__
...
...
@@ -48,4 +46,3 @@ __all__ += ml.__all__
__all__
+=
compare
.
__all__
__all__
+=
conv
.
__all__
__all__
+=
metric_op
.
__all__
__all__
+=
data_preprocessing
.
__all__
python/paddle_fl/mpc/layers/data_preprocessing.py
已删除
100644 → 0
浏览文件 @
57c82ab5
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mpc data preprocessing op layers.
"""
from
paddle.fluid.data_feeder
import
check_type
,
check_dtype
from
..framework
import
check_mpc_variable_and_dtype
from
..mpc_layer_helper
import
MpcLayerHelper
from
.math
import
reduce_sum
__all__
=
[
'mean_normalize'
]
def
mean_normalize
(
f_min
,
f_max
,
f_mean
,
sample_num
):
'''
Mean normalization is a method used to normalize the range of independent
variables or features of data.
Refer to:
https://en.wikipedia.org/wiki/Feature_scaling#Mean_normalization
Args:
f_min (Variable): A 2-D tensor with shape [P, N], where P is the party
num and N is the feature num. Each row contains the
local min feature val of N features.
f_max (Variable): A 2-D tensor with shape [P, N], where P is the party
num and N is the feature num. Each row contains the
local max feature val of N features.
f_mean (Variable): A 2-D tensor with shape [P, N], where P is the party
num and N is the feature num. Each row contains the
local min feature val of N features.
sample_num (Variable): A 1-D tensor with shape [P], where P is the
party num. Each element contains sample num
of party_i.
Returns:
f_range (Variable): A 1-D tensor with shape [N], where N is the
feature num. Each element contains global
range of feature_i.
f_mean_out (Variable): A 1-D tensor with shape [N], where N is the
feature num. Each element contains global
range of feature_i.
Examples:
.. code-block:: python
import paddle_fl.mpc as pfl_mpc
pfl_mpc.init("aby3", role, "localhost", redis_server, redis_port)
# 2 for share, 4 for 4 party, 100 for feat_num
input_size = [2, 4, 100]
mi = pfl_mpc.data(name='mi', shape=input_size, dtype='int64')
ma = pfl_mpc.data(name='ma', shape=input_size, dtype='int64')
me = pfl_mpc.data(name='me', shape=input_size, dtype='int64')
sn = pfl_mpc.data(name='sn', shape=input_size[:-1], dtype='int64')
out0, out1 = pfl_mpc.layers.mean_normalize(f_min=mi, f_max=ma,
f_mean=me, sample_num=sn)
exe = fluid.Executor(place=fluid.CPUPlace())
# feed encrypted data
f_range, f_mean = exe.run(feed={'mi': f_min, 'ma': f_max,
'me': f_mean, 'sn': sample_num}, fetch_list=[out0, out1])
'''
helper
=
MpcLayerHelper
(
"mean_normalize"
,
**
locals
())
# dtype = helper.input_dtype()
dtype
=
'int64'
check_dtype
(
dtype
,
'f_min'
,
[
'int64'
],
'mean_normalize'
)
check_dtype
(
dtype
,
'f_max'
,
[
'int64'
],
'mean_normalize'
)
check_dtype
(
dtype
,
'f_mean'
,
[
'int64'
],
'mean_normalize'
)
check_dtype
(
dtype
,
'sample_num'
,
[
'int64'
],
'mean_normalize'
)
f_range
=
helper
.
create_mpc_variable_for_type_inference
(
dtype
=
f_min
.
dtype
)
f_mean_out
=
helper
.
create_mpc_variable_for_type_inference
(
dtype
=
f_min
.
dtype
)
total_num
=
reduce_sum
(
sample_num
)
op_type
=
'mean_normalize'
helper
.
append_op
(
type
=
'mpc_'
+
op_type
,
inputs
=
{
"Min"
:
f_min
,
"Max"
:
f_max
,
"Mean"
:
f_mean
,
"SampleNum"
:
sample_num
,
"TotalNum"
:
total_num
,
},
outputs
=
{
"Range"
:
f_range
,
"MeanOut"
:
f_mean_out
,
},
)
return
f_range
,
f_mean_out
python/paddle_fl/mpc/layers/ml.py
浏览文件 @
63ae7e62
...
...
@@ -37,6 +37,7 @@ __all__ = [
'pool2d'
,
'batch_norm'
,
'reshape'
,
'mean_normalize'
,
]
...
...
@@ -680,3 +681,92 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
"XShape"
:
x_shape
})
return
helper
.
append_activation
(
out
)
def
mean_normalize
(
f_min
,
f_max
,
f_mean
,
sample_num
):
'''
Mean normalization is a method used to normalize the range of independent
variables or features of data.
Refer to:
https://en.wikipedia.org/wiki/Feature_scaling#Mean_normalization
Args:
f_min (Variable): A 2-D tensor with shape [P, N], where P is the party
num and N is the feature num. Each row contains the
local min feature val of N features.
f_max (Variable): A 2-D tensor with shape [P, N], where P is the party
num and N is the feature num. Each row contains the
local max feature val of N features.
f_mean (Variable): A 2-D tensor with shape [P, N], where P is the party
num and N is the feature num. Each row contains the
local min feature val of N features.
sample_num (Variable): A 1-D tensor with shape [P], where P is the
party num. Each element contains sample num
of party_i.
Returns:
f_range (Variable): A 1-D tensor with shape [N], where N is the
feature num. Each element contains global
range of feature_i.
f_mean_out (Variable): A 1-D tensor with shape [N], where N is the
feature num. Each element contains global
range of feature_i.
Examples:
.. code-block:: python
import paddle_fl.mpc as pfl_mpc
pfl_mpc.init("aby3", role, "localhost", redis_server, redis_port)
# 2 for share, 4 for 4 party, 100 for feat_num
input_size = [2, 4, 100]
mi = pfl_mpc.data(name='mi', shape=input_size, dtype='int64')
ma = pfl_mpc.data(name='ma', shape=input_size, dtype='int64')
me = pfl_mpc.data(name='me', shape=input_size, dtype='int64')
sn = pfl_mpc.data(name='sn', shape=input_size[:-1], dtype='int64')
out0, out1 = pfl_mpc.layers.mean_normalize(f_min=mi, f_max=ma,
f_mean=me, sample_num=sn)
exe = fluid.Executor(place=fluid.CPUPlace())
# feed encrypted data
f_range, f_mean = exe.run(feed={'mi': f_min, 'ma': f_max,
'me': f_mean, 'sn': sample_num}, fetch_list=[out0, out1])
'''
helper
=
MpcLayerHelper
(
"mean_normalize"
,
**
locals
())
# dtype = helper.input_dtype()
dtype
=
'int64'
check_dtype
(
dtype
,
'f_min'
,
[
'int64'
],
'mean_normalize'
)
check_dtype
(
dtype
,
'f_max'
,
[
'int64'
],
'mean_normalize'
)
check_dtype
(
dtype
,
'f_mean'
,
[
'int64'
],
'mean_normalize'
)
check_dtype
(
dtype
,
'sample_num'
,
[
'int64'
],
'mean_normalize'
)
f_range
=
helper
.
create_mpc_variable_for_type_inference
(
dtype
=
f_min
.
dtype
)
f_mean_out
=
helper
.
create_mpc_variable_for_type_inference
(
dtype
=
f_min
.
dtype
)
# to avoid circular dependencies
from
.math
import
reduce_sum
total_num
=
reduce_sum
(
sample_num
)
op_type
=
'mean_normalize'
helper
.
append_op
(
type
=
'mpc_'
+
op_type
,
inputs
=
{
"Min"
:
f_min
,
"Max"
:
f_max
,
"Mean"
:
f_mean
,
"SampleNum"
:
sample_num
,
"TotalNum"
:
total_num
,
},
outputs
=
{
"Range"
:
f_range
,
"MeanOut"
:
f_mean_out
,
},
)
return
f_range
,
f_mean_out
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录