Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleFL
提交
3d195298
P
PaddleFL
项目概览
PaddlePaddle
/
PaddleFL
通知
35
Star
5
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
6
列表
看板
标记
里程碑
合并请求
4
Wiki
3
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleFL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
6
Issue
6
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
3
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3d195298
编写于
8月 26, 2020
作者:
J
jhjiangcs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix conflict bugs.
上级
383137c8
变更
9
展开全部
隐藏空白更改
内联
并排
Showing
9 changed file
with
23 addition
and
1569 deletion
+23
-1569
core/paddlefl_mpc/data_utils/data_utils.cc
core/paddlefl_mpc/data_utils/data_utils.cc
+0
-15
core/paddlefl_mpc/operators/mpc_relu_op.cc
core/paddlefl_mpc/operators/mpc_relu_op.cc
+0
-14
core/paddlefl_mpc/operators/mpc_relu_op.h
core/paddlefl_mpc/operators/mpc_relu_op.h
+0
-9
core/privc3/circuit_context.h
core/privc3/circuit_context.h
+0
-81
core/privc3/fixedpoint_tensor.h
core/privc3/fixedpoint_tensor.h
+0
-169
core/privc3/fixedpoint_tensor_imp.h
core/privc3/fixedpoint_tensor_imp.h
+0
-355
core/privc3/fixedpoint_tensor_test.cc
core/privc3/fixedpoint_tensor_test.cc
+23
-918
python/paddle_fl/mpc/layers/ml.py
python/paddle_fl/mpc/layers/ml.py
+0
-4
python/paddle_fl/mpc/mpc_layer_helper.py
python/paddle_fl/mpc/mpc_layer_helper.py
+0
-4
未找到文件。
core/paddlefl_mpc/data_utils/data_utils.cc
浏览文件 @
3d195298
<<<<<<<
HEAD
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
...
...
@@ -13,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
=======
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
#include <atomic>
#include <set>
#include <string>
...
...
@@ -90,7 +87,6 @@ PYBIND11_MODULE(mpc_data_utils, m)
{
// optional module docstring
m
.
doc
()
=
"pybind11 paddle-mpc plugin: data_utils (share, reveal, psi)"
;
<<<<<<<
HEAD
m
.
def
(
"share"
,
&
share
<
long
long
,
paddle
::
mpc
::
ABY3_SCALING_FACTOR
>
,
"split plaintext into three shares."
);
...
...
@@ -100,17 +96,6 @@ PYBIND11_MODULE(mpc_data_utils, m)
m
.
def
(
"send_psi"
,
&
send_psi
,
"Send input in two party PSI."
);
m
.
def
(
"recv_psi"
,
&
recv_psi
,
"Send input and return PSI result as output in two party PSI."
);
=======
m
.
def
(
"share"
,
&
share
<
long
long
,
paddle
::
mpc
::
ABY3_SCALING_FACTOR
>
,
"split plaintext into three shares."
);
m
.
def
(
"reveal"
,
&
reveal
<
long
long
,
paddle
::
mpc
::
ABY3_SCALING_FACTOR
>
,
"combine three shares to reveal plaintext."
);
m
.
def
(
"send_psi"
,
&
send_psi
,
"Send input in two party PSI."
);
m
.
def
(
"recv_psi"
,
&
recv_psi
,
"Send input and return PSI result as output in two party PSI."
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
m
.
attr
(
"mpc_one_share"
)
=
(
1
<<
paddle
::
mpc
::
ABY3_SCALING_FACTOR
)
/
3
;
}
...
...
core/paddlefl_mpc/operators/mpc_relu_op.cc
浏览文件 @
3d195298
...
...
@@ -25,11 +25,7 @@ class MpcReluOp : public framework::OperatorWithKernel {
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
auto
in_dims
=
ctx
->
GetInputDim
(
"X"
);
<<<<<<<
HEAD
ctx
->
SetOutputDim
(
"Out"
,
in_dims
);
=======
ctx
->
SetOutputDim
(
"Y"
,
in_dims
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
ctx
->
SetOutputDim
(
"Derivative"
,
in_dims
);
}
};
...
...
@@ -39,11 +35,7 @@ class MpcReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void
Make
()
override
{
AddInput
(
"X"
,
"The input tensor."
);
<<<<<<<
HEAD
AddOutput
(
"Out"
,
"Output of relu_op"
);
=======
AddOutput
(
"Y"
,
"Output of relu_op"
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
AddOutput
(
"Derivative"
,
"Derivative of relu_op"
);
AddComment
(
R"DOC(
Mpc Relu Operator.
...
...
@@ -71,15 +63,9 @@ public:
protected:
void
Apply
(
GradOpPtr
<
T
>
grad
)
const
override
{
grad
->
SetType
(
"mpc_relu_grad"
);
<<<<<<<
HEAD
grad
->
SetInput
(
"Out"
,
this
->
Output
(
"Out"
));
grad
->
SetInput
(
"Derivative"
,
this
->
Output
(
"Derivative"
));
grad
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
this
->
OutputGrad
(
"Out"
));
=======
grad
->
SetInput
(
"Y"
,
this
->
Output
(
"Y"
));
grad
->
SetInput
(
"Derivative"
,
this
->
Output
(
"Derivative"
));
grad
->
SetInput
(
framework
::
GradVarName
(
"Y"
),
this
->
OutputGrad
(
"Y"
));
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
grad
->
SetAttrMap
(
this
->
Attrs
());
grad
->
SetOutput
(
framework
::
GradVarName
(
"X"
),
this
->
InputGrad
(
"X"
));
}
...
...
core/paddlefl_mpc/operators/mpc_relu_op.h
浏览文件 @
3d195298
...
...
@@ -25,11 +25,7 @@ class MpcReluKernel : public MpcOpKernel<T> {
public:
void
ComputeImpl
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
const
Tensor
*
in_t
=
ctx
.
Input
<
Tensor
>
(
"X"
);
<<<<<<<
HEAD
Tensor
*
out_t
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
=======
Tensor
*
out_t
=
ctx
.
Output
<
Tensor
>
(
"Y"
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
Tensor
*
der_t
=
ctx
.
Output
<
Tensor
>
(
"Derivative"
);
auto
x
=
in_t
->
data
<
T
>
();
auto
y
=
out_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
...
...
@@ -45,13 +41,8 @@ template <typename DeviceContext, typename T>
class
MpcReluGradKernel
:
public
MpcOpKernel
<
T
>
{
public:
void
ComputeImpl
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
<<<<<<<
HEAD
auto
*
dy_t
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
y_t
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
=======
auto
*
dy_t
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
*
y_t
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
auto
*
der_t
=
ctx
.
Input
<
Tensor
>
(
"Derivative"
);
auto
*
dx_t
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
dx
=
dx_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
...
...
core/privc3/circuit_context.h
浏览文件 @
3d195298
...
...
@@ -15,7 +15,6 @@
#include <algorithm>
#include <memory>
#include <algorithm>
#include "core/paddlefl_mpc/mpc_protocol/abstract_network.h"
#include "prng_utils.h"
...
...
@@ -81,85 +80,6 @@ public:
void
set_network
(
std
::
shared_ptr
<
AbstractNetwork
>
network
)
{
_network
=
network
;
}
<<<<<<<
HEAD
AbstractNetwork
*
network
()
{
return
_network
.
get
();
}
void
set_random_seed
(
const
block
&
seed
,
size_t
idx
)
{
if
(
idx
>=
3
)
{
// exception handling
}
_prng
[
idx
].
set_seed
(
seed
);
}
size_t
party
()
const
{
return
_party
;
}
size_t
pre_party
()
const
{
return
(
_party
+
3
-
1
)
%
3
;
}
size_t
next_party
()
const
{
return
(
_party
+
1
)
%
3
;
}
template
<
typename
T
>
T
gen_random
(
bool
next
)
{
return
_prng
[
next
].
get
<
T
>
();
}
template
<
typename
T
,
template
<
typename
>
class
Tensor
>
void
gen_random
(
Tensor
<
T
>&
tensor
,
bool
next
)
{
std
::
for_each
(
tensor
.
data
(),
tensor
.
data
()
+
tensor
.
numel
(),
[
this
,
next
](
T
&
val
)
{
val
=
this
->
template
gen_random
<
T
>(
next
);
});
}
template
<
typename
T
>
T
gen_random_private
()
{
return
_prng
[
2
].
get
<
T
>
();
}
template
<
typename
T
,
template
<
typename
>
class
Tensor
>
void
gen_random_private
(
Tensor
<
T
>&
tensor
)
{
std
::
for_each
(
tensor
.
data
(),
tensor
.
data
()
+
tensor
.
numel
(),
[
this
](
T
&
val
)
{
val
=
this
->
template
gen_random_private
<
T
>();
});
}
template
<
typename
T
>
T
gen_zero_sharing_arithmetic
()
{
return
_prng
[
0
].
get
<
T
>
()
-
_prng
[
1
].
get
<
T
>
();
}
template
<
typename
T
,
template
<
typename
>
class
Tensor
>
void
gen_zero_sharing_arithmetic
(
Tensor
<
T
>&
tensor
)
{
std
::
for_each
(
tensor
.
data
(),
tensor
.
data
()
+
tensor
.
numel
(),
[
this
](
T
&
val
)
{
val
=
this
->
template
gen_zero_sharing_arithmetic
<
T
>();
});
}
template
<
typename
T
>
T
gen_zero_sharing_boolean
()
{
return
_prng
[
0
].
get
<
T
>
()
^
_prng
[
1
].
get
<
T
>
();
}
template
<
typename
T
,
template
<
typename
>
class
Tensor
>
void
gen_zero_sharing_boolean
(
Tensor
<
T
>&
tensor
)
{
std
::
for_each
(
tensor
.
data
(),
tensor
.
data
()
+
tensor
.
numel
(),
[
this
](
T
&
val
)
{
val
=
this
->
template
gen_zero_sharing_boolean
<
T
>();
});
}
template
<
typename
T
,
template
<
typename
>
class
Tensor
>
=======
AbstractNetwork
*
network
()
{
return
_network
.
get
();
...
...
@@ -237,7 +157,6 @@ public:
}
template
<
typename
T
,
template
<
typename
>
class
Tensor
>
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
void
ot
(
size_t
sender
,
size_t
receiver
,
size_t
helper
,
const
Tensor
<
T
>*
choice
,
const
Tensor
<
T
>*
m
[
2
],
Tensor
<
T
>*
buffer
[
2
],
Tensor
<
T
>*
ret
)
{
...
...
core/privc3/fixedpoint_tensor.h
浏览文件 @
3d195298
...
...
@@ -28,7 +28,6 @@ class FixedPointTensor {
public:
explicit
FixedPointTensor
(
TensorAdapter
<
T
>*
share_tensor
[
2
]);
<<<<<<<
HEAD
explicit
FixedPointTensor
(
TensorAdapter
<
T
>*
share_tensor_0
,
TensorAdapter
<
T
>*
share_tensor_1
);
...
...
@@ -162,141 +161,6 @@ public:
size_t
...
N1
>
void
gt
(
const
CTensor
<
T
,
N1
...
>*
rhs
,
BooleanTensor
<
T
>*
ret
)
const
;
=======
explicit
FixedPointTensor
(
TensorAdapter
<
T
>*
share_tensor_0
,
TensorAdapter
<
T
>*
share_tensor_1
);
~
FixedPointTensor
()
{};
//get mutable shape of tensor
TensorAdapter
<
T
>*
mutable_share
(
size_t
idx
);
const
TensorAdapter
<
T
>*
share
(
size_t
idx
)
const
;
size_t
numel
()
const
{
return
_share
[
0
]
->
numel
();
}
// reveal fixedpointtensor to one party
void
reveal_to_one
(
size_t
party
,
TensorAdapter
<
T
>*
ret
)
const
;
// reveal fixedpointtensor to all parties
void
reveal
(
TensorAdapter
<
T
>*
ret
)
const
;
const
std
::
vector
<
size_t
>
shape
()
const
;
//convert TensorAdapter to shares
static
void
share
(
const
TensorAdapter
<
T
>*
input
,
TensorAdapter
<
T
>*
output_shares
[
3
],
block
seed
=
g_zero_block
);
// element-wise add with FixedPointTensor
void
add
(
const
FixedPointTensor
*
rhs
,
FixedPointTensor
*
ret
)
const
;
// element-wise add with TensorAdapter
void
add
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
*
ret
)
const
;
// element-wise sub with FixedPointTensor
void
sub
(
const
FixedPointTensor
*
rhs
,
FixedPointTensor
*
ret
)
const
;
// element-wise sub with TensorAdapter
void
sub
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
*
ret
)
const
;
// negative
void
negative
(
FixedPointTensor
*
ret
)
const
;
// element-wise mul with FixedPointTensor using truncate1
void
mul
(
const
FixedPointTensor
*
rhs
,
FixedPointTensor
*
ret
)
const
;
// element-wise mul with TensorAdapter
void
mul
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
*
ret
)
const
;
// div by TensorAdapter
void
div
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
*
ret
)
const
;
// div by FixedPointedTensor
// TODO@yqy : not surport operator rhs <= 0 now
void
div
(
const
FixedPointTensor
*
rhs
,
FixedPointTensor
*
ret
,
size_t
iter
=
16
,
double
x0
=
pow
(
2
,
-
15
))
const
;
// long div by boolean circuit
// res_int_len: estimated bit len of the integer part of result
void
long_div
(
const
FixedPointTensor
*
rhs
,
FixedPointTensor
*
ret
,
size_t
res_int_len
=
20
)
const
;
void
inverse_square_root
(
FixedPointTensor
*
ret
,
size_t
iter
=
16
,
double
x0
=
0x1
p
-
10
)
const
;
// dot_mul
template
<
template
<
typename
U
,
size_t
...>
class
CTensor
,
size_t
...
N1
>
void
dot_mul
(
const
CTensor
<
T
,
N1
...
>*
rhs
,
FixedPointTensor
*
ret
)
const
;
//sum all element
void
sum
(
FixedPointTensor
*
ret
)
const
;
// mat_mul with FixedPointTensor
void
mat_mul
(
const
FixedPointTensor
*
rhs
,
FixedPointTensor
*
ret
)
const
;
// mat_mul with TensorAdapter
void
mat_mul
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
*
ret
)
const
;
// exp approximate: exp(x) = \lim_{n->inf} (1+x/n)^n
// where n = 2^ite
void
exp
(
FixedPointTensor
*
ret
,
size_t
iter
=
8
)
const
;
// element-wise relu
void
relu
(
FixedPointTensor
*
ret
)
const
;
// element-wise relu with relu'
void
relu_with_derivative
(
FixedPointTensor
*
ret
,
BooleanTensor
<
T
>*
derivative
)
const
;
// element-wise sigmoid using 3 piecewise polynomials
void
sigmoid
(
FixedPointTensor
*
ret
)
const
;
// element-wise sigmoid using 5 pieces polynomial
// see paper [Privacy-preserving collaborative machine learning
// on genomic data using TensorFlow]
void
sigmoid_enhanced
(
FixedPointTensor
*
ret
)
const
;
// element-wise sigmoid using Chebyshev polynomial approximation
// implemented with ref to tfe[https://github.com/tf-encrypted/tf-encrypted]
void
sigmoid_chebyshev
(
FixedPointTensor
*
ret
)
const
;
// softmax axis = -1
void
softmax
(
FixedPointTensor
*
ret
,
bool
use_relu
=
false
,
bool
use_long_div
=
true
)
const
;
// element-wise polynomial
void
polynomial
(
const
TensorAdapter
<
T
>*
coeff
,
FixedPointTensor
*
ret
)
const
;
// element-wise piecewise polynomial
void
polynomial_piecewise
(
const
TensorAdapter
<
T
>*
coeff
,
const
TensorAdapter
<
T
>*
break_point
,
FixedPointTensor
*
ret
)
const
;
// element-wise compare
// <
template
<
template
<
typename
U
,
size_t
...>
class
CTensor
,
size_t
...
N1
>
void
lt
(
const
CTensor
<
T
,
N1
...
>*
rhs
,
BooleanTensor
<
T
>*
ret
)
const
;
// <=
template
<
template
<
typename
U
,
size_t
...>
class
CTensor
,
size_t
...
N1
>
void
leq
(
const
CTensor
<
T
,
N1
...
>*
rhs
,
BooleanTensor
<
T
>*
ret
)
const
;
// >
template
<
template
<
typename
U
,
size_t
...>
class
CTensor
,
size_t
...
N1
>
void
gt
(
const
CTensor
<
T
,
N1
...
>*
rhs
,
BooleanTensor
<
T
>*
ret
)
const
;
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
// >=
template
<
template
<
typename
U
,
size_t
...>
class
CTensor
,
size_t
...
N1
>
...
...
@@ -332,7 +196,6 @@ private:
static
inline
std
::
shared_ptr
<
CircuitContext
>
aby3_ctx
()
{
return
paddle
::
mpc
::
ContextHolder
::
mpc_ctx
();
}
<<<<<<<
HEAD
static
inline
std
::
shared_ptr
<
TensorAdapterFactory
>
tensor_factory
()
{
return
paddle
::
mpc
::
ContextHolder
::
tensor_factory
();
...
...
@@ -368,38 +231,6 @@ private:
return
aby3_ctx
()
->
next_party
();
}
=======
static
inline
std
::
shared_ptr
<
TensorAdapterFactory
>
tensor_factory
()
{
return
paddle
::
mpc
::
ContextHolder
::
tensor_factory
();
}
static
void
truncate
(
const
FixedPointTensor
*
op
,
FixedPointTensor
*
ret
,
size_t
scaling_factor
);
template
<
typename
MulFunc
>
static
void
mul_trunc
(
const
FixedPointTensor
<
T
,
N
>*
lhs
,
const
FixedPointTensor
<
T
,
N
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
,
MulFunc
mul_func
);
// reduce last dim
static
void
reduce
(
FixedPointTensor
<
T
,
N
>*
input
,
FixedPointTensor
<
T
,
N
>*
ret
);
static
size_t
party
()
{
return
aby3_ctx
()
->
party
();
}
static
size_t
pre_party
()
{
return
aby3_ctx
()
->
pre_party
();
}
static
size_t
next_party
()
{
return
aby3_ctx
()
->
next_party
();
}
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
static
void
reshare
(
const
TensorAdapter
<
T
>*
send_val
,
TensorAdapter
<
T
>*
recv_val
)
{
if
(
party
()
==
0
)
{
...
...
core/privc3/fixedpoint_tensor_imp.h
浏览文件 @
3d195298
...
...
@@ -208,7 +208,6 @@ void FixedPointTensor<T, N>::truncate(const FixedPointTensor<T, N>* op,
return
;
}
<<<<<<<
HEAD
// Protocol. `truncate3`
// P2 randomly generates r' \in (-2^62, 2^62), randomly generates r'_0, r_0, r_1 in Z_{2^64},
// P2 compute r'_1 = r' - r'_0, r_2 = r'/2^N - r_0 - r_1, let x2 = r_2
...
...
@@ -550,249 +549,6 @@ void FixedPointTensor<T, N>::sigmoid_chebyshev(FixedPointTensor<T, N>* ret) cons
}
template
<
typename
T
,
size_t
N
>
=======
template
<
typename
T
,
size_t
N
>
template
<
typename
MulFunc
>
void
FixedPointTensor
<
T
,
N
>::
mul_trunc
(
const
FixedPointTensor
<
T
,
N
>*
lhs
,
const
FixedPointTensor
<
T
,
N
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
,
MulFunc
mul_func
)
{
auto
r_zero
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
aby3_ctx
()
->
gen_zero_sharing_arithmetic
(
*
r_zero
.
get
());
// temp = _share[0]->mul(rhs->_share[0]) +
// _share[0]->mul(rhs->_share[1]) +
// _share[1]->mul(rhs->_share[0]) +
// r_zero
auto
temp
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
auto
temp1
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
// use mul_func to fit both element_wise mul and mat mul
(
lhs
->
share
(
0
)
->*
mul_func
)(
rhs
->
share
(
0
),
temp
.
get
());
(
lhs
->
share
(
0
)
->*
mul_func
)(
rhs
->
share
(
1
),
temp1
.
get
());
temp1
->
add
(
temp
.
get
(),
temp1
.
get
());
(
lhs
->
share
(
1
)
->*
mul_func
)(
rhs
->
share
(
0
),
temp
.
get
());
temp1
->
add
(
r_zero
.
get
(),
temp1
.
get
());
temp
->
add
(
temp1
.
get
(),
temp
.
get
());
auto
temp2
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
auto
temp3
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
TensorAdapter
<
int64_t
>*
temp_array
[
2
]
=
{
temp2
.
get
(),
temp3
.
get
()};
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
ret_no_trunc
=
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp_array
);
temp
->
copy
(
ret_no_trunc
->
_share
[
0
]);
reshare
(
temp
.
get
(),
ret_no_trunc
->
_share
[
1
]);
truncate
(
ret_no_trunc
.
get
(),
ret
,
N
);
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
mul
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
// PADDLE_ENFORCE_EQ(N, rhs->scaling_factor(),
// "no match scaling factor");
auto
temp0
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
auto
temp1
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
temp
=
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp0
.
get
(),
temp1
.
get
());
_share
[
0
]
->
mul
(
rhs
,
temp
->
_share
[
0
]);
_share
[
1
]
->
mul
(
rhs
,
temp
->
_share
[
1
]);
truncate
(
temp
.
get
(),
ret
,
rhs
->
scaling_factor
());
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
sum
(
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
PADDLE_ENFORCE_EQ
(
ret
->
numel
(),
1
,
"output size should be 1."
);
T
sum1
=
(
T
)
0
;
T
sum2
=
(
T
)
0
;
T
*
iter_0
=
_share
[
0
]
->
data
();
T
*
iter_1
=
_share
[
1
]
->
data
();
for
(
int
i
=
0
;
i
<
this
->
numel
();
++
i
)
{
sum1
+=
*
(
iter_0
+
i
);
sum2
+=
*
(
iter_1
+
i
);
}
assign_to_tensor
(
ret
->
_share
[
0
],
sum1
);
assign_to_tensor
(
ret
->
_share
[
1
],
sum2
);
}
template
<
typename
T
,
size_t
N
>
template
<
template
<
typename
U
,
size_t
...>
class
CTensor
,
size_t
...
N1
>
void
FixedPointTensor
<
T
,
N
>::
dot_mul
(
const
CTensor
<
T
,
N1
...
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
PADDLE_ENFORCE_EQ
(
ret
->
numel
(),
1
,
"output size should be 1."
);
auto
temp0
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
auto
temp1
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
temp
=
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp0
.
get
(),
temp1
.
get
());
this
->
mul
(
rhs
,
temp
.
get
());
temp
->
sum
(
ret
);
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
mat_mul
(
const
FixedPointTensor
<
T
,
N
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
mul_trunc
(
this
,
rhs
,
ret
,
&
TensorAdapter
<
T
>::
mat_mul
);
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
mat_mul
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
_share
[
0
]
->
mat_mul
(
rhs
,
ret
->
_share
[
0
]);
_share
[
1
]
->
mat_mul
(
rhs
,
ret
->
_share
[
1
]);
truncate
(
ret
,
ret
,
rhs
->
scaling_factor
());
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
div
(
const
TensorAdapter
<
T
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
PADDLE_ENFORCE_EQ
(
N
,
rhs
->
scaling_factor
(),
"no match scaling factor"
);
auto
temp
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
double
scale
=
std
::
pow
(
2
,
rhs
->
scaling_factor
());
auto
inverse
=
[
scale
](
T
d
)
->
T
{
return
1.0
*
scale
/
d
*
scale
;
};
std
::
transform
(
rhs
->
data
(),
rhs
->
data
()
+
rhs
->
numel
(),
temp
->
data
(),
inverse
);
temp
->
scaling_factor
()
=
rhs
->
scaling_factor
();
this
->
mul
(
temp
.
get
(),
ret
);
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
div
(
const
FixedPointTensor
<
T
,
N
>*
rhs
,
FixedPointTensor
<
T
,
N
>*
ret
,
size_t
iter
,
double
x0
)
const
{
auto
temp0
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
auto
temp1
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
temp
=
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp0
.
get
(),
temp1
.
get
());
reciprocal
(
rhs
,
temp
.
get
(),
iter
,
x0
);
this
->
mul
(
temp
.
get
(),
ret
);
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
exp
(
FixedPointTensor
<
T
,
N
>*
ret
,
size_t
iter
)
const
{
// exp approximate: exp(x) = \lim_{n->inf} (1+x/n)^n
// where n = 2^ite
auto
pow_iter
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
assign_to_tensor
(
pow_iter
.
get
(),
(
T
)
(
pow
(
2
,
N
-
iter
)));
pow_iter
->
scaling_factor
()
=
N
;
auto
tensor_one
=
tensor_factory
()
->
template
create
<
T
>(
this
->
shape
());
assign_to_tensor
(
tensor_one
.
get
(),
(
T
)
1
<<
N
);
tensor_one
->
scaling_factor
()
=
N
;
this
->
mul
(
pow_iter
.
get
(),
ret
);
ret
->
add
(
tensor_one
.
get
(),
ret
);
for
(
int
i
=
0
;
i
<
iter
;
++
i
)
{
ret
->
mul
(
ret
,
ret
);
}
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
relu
(
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
//utilize polynomial_piecewise
// break_point = {0}, coeff[0] = {0, 0}, coeff[1] = {0, 1}
// break_point.shape = {1, this->shape}, coeff.shape = {2, 2, this->shape}
auto
shape_
=
shape
();
//construct break_point
auto
b_shape
=
shape_
;
b_shape
.
insert
(
b_shape
.
begin
(),
1
);
auto
break_point
=
tensor_factory
()
->
template
create
<
T
>(
b_shape
);
T
*
b_ptr
=
break_point
->
data
();
for
(
size_t
i
=
0
;
i
<
break_point
->
numel
();
++
i
)
{
b_ptr
[
i
]
=
0
;
}
break_point
->
scaling_factor
()
=
N
;
//contruct coeff
std
::
vector
<
size_t
>
c_shape
=
{
2
,
2
};
c_shape
.
insert
(
c_shape
.
end
(),
shape_
.
begin
(),
shape_
.
end
());
auto
coeff
=
tensor_factory
()
->
template
create
<
T
>(
c_shape
);
T
*
c_ptr
=
coeff
->
data
();
for
(
size_t
i
=
0
;
i
<
3
*
this
->
numel
();
++
i
)
{
c_ptr
[
i
]
=
0
;
}
for
(
size_t
i
=
3
*
this
->
numel
();
i
<
4
*
this
->
numel
();
++
i
)
{
c_ptr
[
i
]
=
(
T
)
1
<<
N
;
}
coeff
->
scaling_factor
()
=
N
;
this
->
polynomial_piecewise
(
coeff
.
get
(),
break_point
.
get
(),
ret
);
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
relu_with_derivative
(
FixedPointTensor
<
T
,
N
>*
ret
,
BooleanTensor
<
T
>*
derivative
)
const
{
auto
shape_
=
shape
();
auto
zero
=
tensor_factory
()
->
template
create
<
T
>(
shape_
);
assign_to_tensor
(
zero
.
get
(),
(
T
)
0
);
zero
->
scaling_factor
()
=
N
;
auto
tmp0
=
tensor_factory
()
->
template
create
<
T
>(
shape_
);
auto
tmp1
=
tensor_factory
()
->
template
create
<
T
>(
shape_
);
BooleanTensor
<
T
>
der
(
tmp0
.
get
(),
tmp1
.
get
());
gt
(
zero
.
get
(),
&
der
);
der
.
mul
(
this
,
ret
);
if
(
derivative
)
{
der
.
share
(
0
)
->
copy
(
derivative
->
share
(
0
));
der
.
share
(
1
)
->
copy
(
derivative
->
share
(
1
));
}
}
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
sigmoid_chebyshev
(
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
//utilize Chebyshev polynomial approximation
// more accurate in small range, such as [-4, 4]
auto
shape
=
ret
->
shape
();
std
::
vector
<
size_t
>
shape_
=
shape
;
shape_
.
insert
(
shape_
.
begin
(),
10
);
auto
numel
=
ret
->
numel
();
auto
coeff
=
tensor_factory
()
->
template
create
<
T
>(
shape_
);
std
::
vector
<
double
>
w
;
w
.
resize
(
10
,
0.0
f
);
w
[
0
]
=
0.5
;
w
[
1
]
=
0.2159198015
;
w
[
3
]
=
-
0.0082176259
;
w
[
5
]
=
0.0001825597
;
w
[
7
]
=
-
0.0000018848
;
w
[
9
]
=
0.0000000072
;
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
for
(
int
j
=
0
;
j
<
numel
;
++
j
)
{
*
(
coeff
->
data
()
+
i
*
numel
+
j
)
=
(
T
)
(
w
[
i
]
*
pow
(
2
,
N
));
}
}
coeff
->
scaling_factor
()
=
N
;
polynomial
(
coeff
.
get
(),
ret
);
}
template
<
typename
T
,
size_t
N
>
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
void
FixedPointTensor
<
T
,
N
>::
sigmoid
(
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
//utilize polynomial_piecewise
// break_point = {-2.5, 2.5}
...
...
@@ -823,25 +579,6 @@ void FixedPointTensor<T, N>::sigmoid(FixedPointTensor<T, N>* ret) const {
//contruct coeff
std
::
vector
<
size_t
>
c_shape
=
{
3
,
2
};
c_shape
.
insert
(
c_shape
.
end
(),
shape_
.
begin
(),
shape_
.
end
());
<<<<<<<
HEAD
auto
coeff
=
tensor_factory
()
->
template
create
<
T
>(
c_shape
);
T
*
c_ptr
=
coeff
->
data
();
size_t
numel
=
this
->
numel
();
double
scale
=
std
::
pow
(
2
,
N
);
for
(
size_t
i
=
0
;
i
<
numel
;
++
i
)
{
c_ptr
[
i
]
=
0.0001
*
scale
;
c_ptr
[
i
+
numel
]
=
0
;
c_ptr
[
i
+
2
*
numel
]
=
0.5
*
scale
;
c_ptr
[
i
+
3
*
numel
]
=
0.17
*
scale
;
c_ptr
[
i
+
4
*
numel
]
=
(
1
-
0.0001
)
*
scale
;
c_ptr
[
i
+
5
*
numel
]
=
0
;
}
coeff
->
scaling_factor
()
=
N
;
=======
auto
coeff
=
tensor_factory
()
->
template
create
<
T
>(
c_shape
);
...
...
@@ -859,7 +596,6 @@ void FixedPointTensor<T, N>::sigmoid(FixedPointTensor<T, N>* ret) const {
}
coeff
->
scaling_factor
()
=
N
;
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
this
->
polynomial_piecewise
(
coeff
.
get
(),
break_point
.
get
(),
ret
);
}
...
...
@@ -947,41 +683,6 @@ void FixedPointTensor<T, N>::softmax(FixedPointTensor<T, N>* ret,
temp
[
8
]
->
reshape
({
row
,
col
});
temp
[
9
]
->
reshape
({
row
,
col
});
FixedPointTensor
<
T
,
N
>
max_x_broadcast
(
temp
[
8
].
get
(),
temp
[
9
].
get
());
<<<<<<<
HEAD
temp
[
10
]
->
reshape
({
row
,
col
});
auto
exp_lower_bound
=
temp
[
10
].
get
();
auto
transpose
=
[](
const
TensorAdapter
<
T
>*
in
,
TensorAdapter
<
T
>*
out
)
{
// suppose input dims = 2
const
size_t
col
=
in
->
shape
()[
1
];
const
size_t
row
=
in
->
shape
()[
0
];
const
size_t
numel
=
in
->
numel
();
for
(
size_t
k
=
0
;
k
<
numel
;
++
k
)
{
size_t
i
=
k
/
row
;
size_t
j
=
k
%
row
;
out
->
data
()[
k
]
=
in
->
data
()[
j
*
col
+
i
];
}
};
auto
broadcast
=
[](
const
TensorAdapter
<
T
>*
in
,
TensorAdapter
<
T
>*
out
)
{
// suppose input dims = 2
// in shape = [row, 1]
const
size_t
col
=
out
->
shape
()[
1
];
const
size_t
row
=
out
->
shape
()[
0
];
for
(
size_t
k
=
0
;
k
<
out
->
numel
();
++
k
)
{
size_t
i
=
k
/
col
;
out
->
data
()[
k
]
=
in
->
data
()[
i
];
}
};
share
(
0
)
->
copy
(
x
.
mutable_share
(
0
));
share
(
1
)
->
copy
(
x
.
mutable_share
(
1
));
if
(
use_relu
)
{
=======
temp
[
10
]
->
reshape
({
row
,
col
});
auto
exp_lower_bound
=
temp
[
10
].
get
();
...
...
@@ -1015,7 +716,6 @@ void FixedPointTensor<T, N>::softmax(FixedPointTensor<T, N>* ret,
if
(
use_relu
)
{
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
x
.
relu
(
&
x
);
}
else
{
// use exp
...
...
@@ -1087,7 +787,6 @@ void FixedPointTensor<T, N>::long_div(const FixedPointTensor<T, N>* rhs,
assign_to_tensor
(
cmp_res_all
.
share
(
0
),
(
T
)
0
);
assign_to_tensor
(
cmp_res_all
.
share
(
1
),
(
T
)
0
);
<<<<<<<
HEAD
const
size_t
msb
=
sizeof
(
T
)
*
8
-
1
;
sign_lhs
.
bit_extract
(
msb
,
this
);
...
...
@@ -1121,41 +820,6 @@ void FixedPointTensor<T, N>::long_div(const FixedPointTensor<T, N>* rhs,
lshift
(
&
abs_rhs
,
i
,
&
sub_rhs
);
=======
const
size_t
msb
=
sizeof
(
T
)
*
8
-
1
;
sign_lhs
.
bit_extract
(
msb
,
this
);
sign_rhs
.
bit_extract
(
msb
,
rhs
);
sign_lhs
.
bitwise_xor
(
&
sign_rhs
,
&
sign_ret
);
auto
lshift
=
[]
(
const
FixedPointTensor
<
T
,
N
>*
in
,
size_t
rhs
,
FixedPointTensor
<
T
,
N
>*
out
)
{
in
->
share
(
0
)
->
lshift
(
rhs
,
out
->
mutable_share
(
0
));
in
->
share
(
1
)
->
lshift
(
rhs
,
out
->
mutable_share
(
1
));
};
// abs = val - 2 * sign * val
auto
abs
=
[
lshift
]
(
const
FixedPointTensor
<
T
,
N
>*
in
,
const
BooleanTensor
<
T
>*
sign
,
FixedPointTensor
<
T
,
N
>*
out
)
{
lshift
(
in
,
1
,
out
);
sign
->
mul
(
out
,
out
);
in
->
sub
(
out
,
out
);
};
auto
out0
=
tensor_factory
()
->
template
create
<
T
>(
ret
->
shape
());
abs
(
this
,
&
sign_lhs
,
&
abs_lhs
);
abs
(
rhs
,
&
sign_rhs
,
&
abs_rhs
);
for
(
ssize_t
i
=
int_len
-
1
;
i
>=
0
;
--
i
)
{
lshift
(
&
abs_rhs
,
i
,
&
sub_rhs
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
abs_lhs
.
gt
(
&
sub_rhs
,
&
cmp_res
);
...
...
@@ -1167,11 +831,7 @@ void FixedPointTensor<T, N>::long_div(const FixedPointTensor<T, N>* rhs,
}
for
(
size_t
i
=
1
;
i
<=
N
;
++
i
)
{
<<<<<<<
HEAD
truncate3
(
&
abs_rhs
,
&
sub_rhs
,
i
);
=======
truncate
(
&
abs_rhs
,
&
sub_rhs
,
i
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
abs_lhs
.
gt
(
&
sub_rhs
,
&
cmp_res
);
cmp_res
.
mul
(
&
sub_rhs
,
&
sub_rhs
);
cmp_res
.
lshift
(
N
-
i
,
&
cmp_res
);
...
...
@@ -1312,16 +972,6 @@ void FixedPointTensor<T, N>::polynomial_piecewise(
temp
[
temp_index
++
].
get
()));
msb
[
i
]
->
bit_extract
(
sizeof
(
T
)
*
8
-
1
,
temp1
[
i
].
get
());
}
<<<<<<<
HEAD
// b[0] = msb[0], b[i + 1] = ~ msb[i] & msb[i + 1]
std
::
vector
<
std
::
shared_ptr
<
BooleanTensor
<
T
>>>
b
;
b
.
emplace_back
(
std
::
make_shared
<
BooleanTensor
<
T
>>
(
temp
[
temp_index
++
].
get
(),
temp
[
temp_index
++
].
get
()));
b
[
0
]
=
msb
[
0
];
=======
// b[0] = msb[0], b[i + 1] = ~ msb[i] & msb[i + 1]
std
::
vector
<
std
::
shared_ptr
<
BooleanTensor
<
T
>>>
b
;
...
...
@@ -1330,7 +980,6 @@ void FixedPointTensor<T, N>::polynomial_piecewise(
temp
[
temp_index
++
].
get
()));
b
[
0
]
=
msb
[
0
];
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
for
(
int
i
=
0
;
i
<
len_break_point
-
1
;
++
i
)
{
b
.
emplace_back
(
std
::
make_shared
<
BooleanTensor
<
T
>>
(
temp
[
temp_index
++
].
get
(),
...
...
@@ -1535,11 +1184,7 @@ void FixedPointTensor<T, N>::inverse_square_root(const FixedPointTensor* op,
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
x2
=
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp
[
2
].
get
(),
temp
[
3
].
get
());
// x2 = 0.5 * op
<<<<<<<
HEAD
truncate3
(
op
,
x2
.
get
(),
1
);
=======
truncate
(
op
,
x2
.
get
(),
1
);
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
assign_to_tensor
(
y
->
mutable_share
(
0
),
(
T
)(
x0
*
pow
(
2
,
N
)));
assign_to_tensor
(
y
->
mutable_share
(
1
),
(
T
)(
x0
*
pow
(
2
,
N
)));
...
...
core/privc3/fixedpoint_tensor_test.cc
浏览文件 @
3d195298
此差异已折叠。
点击以展开。
python/paddle_fl/mpc/layers/ml.py
浏览文件 @
3d195298
...
...
@@ -234,11 +234,7 @@ def relu(input, name=None):
type
=
"mpc_relu"
,
inputs
=
{
"X"
:
input
},
outputs
=
{
<<<<<<<
HEAD
"Out"
:
out
,
=======
"Y"
:
out
,
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
"Derivative"
:
derivative
}
)
return
out
...
...
python/paddle_fl/mpc/mpc_layer_helper.py
浏览文件 @
3d195298
...
...
@@ -221,11 +221,7 @@ class MpcLayerHelper(LayerHelper):
self
.
append_op
(
type
=
"mpc_"
+
act_type
,
inputs
=
{
"X"
:
[
input_var
]},
<<<<<<<
HEAD
outputs
=
{
"Out"
:
[
tmp
],
=======
outputs
=
{
"Y"
:
[
tmp
],
>>>>>>>
5
a09665c36ffb7eae2288b3f837d3be18091c259
"Derivative"
:
[
derivative
]},
attrs
=
act
)
return
tmp
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录