Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleFL
提交
193e4010
P
PaddleFL
项目概览
PaddlePaddle
/
PaddleFL
通知
35
Star
5
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
6
列表
看板
标记
里程碑
合并请求
4
Wiki
3
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleFL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
6
Issue
6
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
3
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
193e4010
编写于
8月 27, 2020
作者:
J
jhjiangcs
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of
https://github.com/PaddlePaddle/PaddleFL
into smc612
上级
0b003bc4
541e9119
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
155 addition
and
27 deletion
+155
-27
CMakeLists.txt
CMakeLists.txt
+6
-0
core/privc3/fixedpoint_tensor.h
core/privc3/fixedpoint_tensor.h
+3
-3
core/privc3/fixedpoint_tensor_imp.h
core/privc3/fixedpoint_tensor_imp.h
+24
-24
core/privc3/fixedpoint_tensor_test.cc
core/privc3/fixedpoint_tensor_test.cc
+122
-0
未找到文件。
CMakeLists.txt
浏览文件 @
193e4010
...
@@ -61,6 +61,8 @@ option(USE_AES_NI "Compile with AES NI" ON)
...
@@ -61,6 +61,8 @@ option(USE_AES_NI "Compile with AES NI" ON)
option
(
USE_OPENMP
"Compile with OpenMP"
ON
)
option
(
USE_OPENMP
"Compile with OpenMP"
ON
)
option
(
USE_ABY3_TRUNC1
"Compile with ABY3 truncate 1 algorithm"
OFF
)
########################### the project build part ###############################
########################### the project build part ###############################
message
(
STATUS
"Using paddlepaddle installation of
${
paddle_version
}
"
)
message
(
STATUS
"Using paddlepaddle installation of
${
paddle_version
}
"
)
message
(
STATUS
"paddlepaddle include directory:
${
PADDLE_INCLUDE
}
"
)
message
(
STATUS
"paddlepaddle include directory:
${
PADDLE_INCLUDE
}
"
)
...
@@ -84,6 +86,10 @@ if (USE_OPENMP)
...
@@ -84,6 +86,10 @@ if (USE_OPENMP)
find_package
(
OpenMP REQUIRED
)
find_package
(
OpenMP REQUIRED
)
endif
(
USE_OPENMP
)
endif
(
USE_OPENMP
)
if
(
USE_ABY3_TRUNC1
)
add_compile_definitions
(
USE_ABY3_TRUNC1
)
endif
(
USE_ABY3_TRUNC1
)
add_subdirectory
(
core/privc3
)
add_subdirectory
(
core/privc3
)
add_subdirectory
(
core/paddlefl_mpc/mpc_protocol
)
add_subdirectory
(
core/paddlefl_mpc/mpc_protocol
)
add_subdirectory
(
core/paddlefl_mpc/operators
)
add_subdirectory
(
core/paddlefl_mpc/operators
)
...
...
core/privc3/fixedpoint_tensor.h
浏览文件 @
193e4010
...
@@ -191,6 +191,9 @@ public:
...
@@ -191,6 +191,9 @@ public:
void
max_pooling
(
FixedPointTensor
*
ret
,
void
max_pooling
(
FixedPointTensor
*
ret
,
BooleanTensor
<
T
>*
pos
=
nullptr
)
const
;
BooleanTensor
<
T
>*
pos
=
nullptr
)
const
;
static
void
truncate
(
const
FixedPointTensor
*
op
,
FixedPointTensor
*
ret
,
size_t
scaling_factor
);
private:
private:
static
inline
std
::
shared_ptr
<
CircuitContext
>
aby3_ctx
()
{
static
inline
std
::
shared_ptr
<
CircuitContext
>
aby3_ctx
()
{
...
@@ -201,9 +204,6 @@ private:
...
@@ -201,9 +204,6 @@ private:
return
paddle
::
mpc
::
ContextHolder
::
tensor_factory
();
return
paddle
::
mpc
::
ContextHolder
::
tensor_factory
();
}
}
static
void
truncate
(
const
FixedPointTensor
*
op
,
FixedPointTensor
*
ret
,
size_t
scaling_factor
);
template
<
typename
MulFunc
>
template
<
typename
MulFunc
>
static
void
mul_trunc
(
const
FixedPointTensor
<
T
,
N
>*
lhs
,
static
void
mul_trunc
(
const
FixedPointTensor
<
T
,
N
>*
lhs
,
const
FixedPointTensor
<
T
,
N
>*
rhs
,
const
FixedPointTensor
<
T
,
N
>*
rhs
,
...
...
core/privc3/fixedpoint_tensor_imp.h
浏览文件 @
193e4010
...
@@ -21,7 +21,6 @@
...
@@ -21,7 +21,6 @@
#include "prng.h"
#include "prng.h"
namespace
aby3
{
namespace
aby3
{
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
FixedPointTensor
<
T
,
N
>::
FixedPointTensor
(
TensorAdapter
<
T
>*
share_tensor
[
2
])
{
FixedPointTensor
<
T
,
N
>::
FixedPointTensor
(
TensorAdapter
<
T
>*
share_tensor
[
2
])
{
// TODO: check tensors' shapes
// TODO: check tensors' shapes
...
@@ -166,6 +165,7 @@ void FixedPointTensor<T, N>::mul(const FixedPointTensor<T, N>* rhs,
...
@@ -166,6 +165,7 @@ void FixedPointTensor<T, N>::mul(const FixedPointTensor<T, N>* rhs,
mul_trunc
(
this
,
rhs
,
ret
,
&
TensorAdapter
<
T
>::
mul
);
mul_trunc
(
this
,
rhs
,
ret
,
&
TensorAdapter
<
T
>::
mul
);
}
}
#ifdef USE_ABY3_TRUNC1 //use aby3 trunc1
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
truncate
(
const
FixedPointTensor
<
T
,
N
>*
op
,
void
FixedPointTensor
<
T
,
N
>::
truncate
(
const
FixedPointTensor
<
T
,
N
>*
op
,
FixedPointTensor
<
T
,
N
>*
ret
,
FixedPointTensor
<
T
,
N
>*
ret
,
...
@@ -208,7 +208,20 @@ void FixedPointTensor<T, N>::truncate(const FixedPointTensor<T, N>* op,
...
@@ -208,7 +208,20 @@ void FixedPointTensor<T, N>::truncate(const FixedPointTensor<T, N>* op,
return
;
return
;
}
}
// Protocol. `truncate3`
#else // use truncate3
// Protocol. `truncate3` (illustrated for data type T = int64_t)
// motivation:
// truncates in aby3 may cause msb error with small probability
// the reason is that before rishft op, its masked value e.g., x' - r' may overflow in int64_t
// so that, in `truncate3`, we limit r' in (-2^62, 2^62) to avoid the problem.
// notice:
// when r' is contrainted in (-2^62, 2^62),
// the SD (statistical distance) of x' - r' between this
// and r' in Z_{2^64} is equal to |X| / (2^63 + |X|)
// detail protocol:
// P2 randomly generates r' \in (-2^62, 2^62), randomly generates r'_0, r_0, r_1 in Z_{2^64},
// P2 randomly generates r' \in (-2^62, 2^62), randomly generates r'_0, r_0, r_1 in Z_{2^64},
// P2 compute r'_1 = r' - r'_0, r_2 = r'/2^N - r_0 - r_1, let x2 = r_2
// P2 compute r'_1 = r' - r'_0, r_2 = r'/2^N - r_0 - r_1, let x2 = r_2
// P2 send r_0, r'_0 to P0, send r_1, r'_1 to P1
// P2 send r_0, r'_0 to P0, send r_1, r'_1 to P1
...
@@ -217,7 +230,7 @@ void FixedPointTensor<T, N>::truncate(const FixedPointTensor<T, N>* op,
...
@@ -217,7 +230,7 @@ void FixedPointTensor<T, N>::truncate(const FixedPointTensor<T, N>* op,
// P0 set x0 = r_0
// P0 set x0 = r_0
// P0, P1, P2 invoke reshare() with inputs x0, x1, x2 respectively.
// P0, P1, P2 invoke reshare() with inputs x0, x1, x2 respectively.
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
void
FixedPointTensor
<
T
,
N
>::
truncate
3
(
const
FixedPointTensor
<
T
,
N
>*
op
,
void
FixedPointTensor
<
T
,
N
>::
truncate
(
const
FixedPointTensor
<
T
,
N
>*
op
,
FixedPointTensor
<
T
,
N
>*
ret
,
FixedPointTensor
<
T
,
N
>*
ret
,
size_t
scaling_factor
)
{
size_t
scaling_factor
)
{
if
(
scaling_factor
==
0
)
{
if
(
scaling_factor
==
0
)
{
...
@@ -231,23 +244,9 @@ void FixedPointTensor<T, N>::truncate3(const FixedPointTensor<T, N>* op,
...
@@ -231,23 +244,9 @@ void FixedPointTensor<T, N>::truncate3(const FixedPointTensor<T, N>* op,
temp
.
emplace_back
(
temp
.
emplace_back
(
tensor_factory
()
->
template
create
<
T
>(
op
->
shape
()));
tensor_factory
()
->
template
create
<
T
>(
op
->
shape
()));
}
}
// r', contraint in (-2^62, 2^62)
// r'
// notice : when r' is contrainted in (-2^62, 2^62),
// the SD (statistical distance) of x - r' between this
// and r' in Z_{2^64} is equal to |X| / (2^63 + |X|)
// according to http://yuyu.hk/files/ho2.pdf
aby3_ctx
()
->
template
gen_random_private
(
*
temp
[
0
]);
aby3_ctx
()
->
template
gen_random_private
(
*
temp
[
0
]);
int64_t
contraint_upper
=
~
((
uint64_t
)
1
<<
62
);
temp
[
0
]
->
rshift
(
1
,
temp
[
0
].
get
());
int64_t
contraint_low
=
(
uint64_t
)
1
<<
62
;
std
::
for_each
(
temp
[
0
]
->
data
(),
temp
[
0
]
->
data
()
+
temp
[
0
]
->
numel
(),
[
&
contraint_upper
,
&
contraint_low
]
(
T
&
a
)
{
// contraint -2^62 < a < 2^62
if
(
a
>=
0
)
{
a
&=
contraint_upper
;
}
else
{
a
|=
contraint_low
;
}
});
//r'_0, r'_1
//r'_0, r'_1
aby3_ctx
()
->
template
gen_random_private
(
*
temp
[
1
]);
aby3_ctx
()
->
template
gen_random_private
(
*
temp
[
1
]);
...
@@ -307,6 +306,7 @@ void FixedPointTensor<T, N>::truncate3(const FixedPointTensor<T, N>* op,
...
@@ -307,6 +306,7 @@ void FixedPointTensor<T, N>::truncate3(const FixedPointTensor<T, N>* op,
tensor_carry_in
->
scaling_factor
()
=
N
;
tensor_carry_in
->
scaling_factor
()
=
N
;
ret
->
add
(
tensor_carry_in
.
get
(),
ret
);
ret
->
add
(
tensor_carry_in
.
get
(),
ret
);
}
}
#endif //USE_ABY3_TRUNC1
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
template
<
typename
MulFunc
>
template
<
typename
MulFunc
>
...
@@ -345,7 +345,7 @@ void FixedPointTensor<T, N>::mul_trunc(const FixedPointTensor<T, N>* lhs,
...
@@ -345,7 +345,7 @@ void FixedPointTensor<T, N>::mul_trunc(const FixedPointTensor<T, N>* lhs,
temp
->
copy
(
ret_no_trunc
->
_share
[
0
]);
temp
->
copy
(
ret_no_trunc
->
_share
[
0
]);
reshare
(
temp
.
get
(),
ret_no_trunc
->
_share
[
1
]);
reshare
(
temp
.
get
(),
ret_no_trunc
->
_share
[
1
]);
truncate
3
(
ret_no_trunc
.
get
(),
ret
,
N
);
truncate
(
ret_no_trunc
.
get
(),
ret
,
N
);
}
}
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
...
@@ -360,7 +360,7 @@ void FixedPointTensor<T, N>::mul(const TensorAdapter<T>* rhs,
...
@@ -360,7 +360,7 @@ void FixedPointTensor<T, N>::mul(const TensorAdapter<T>* rhs,
_share
[
0
]
->
mul
(
rhs
,
temp
->
_share
[
0
]);
_share
[
0
]
->
mul
(
rhs
,
temp
->
_share
[
0
]);
_share
[
1
]
->
mul
(
rhs
,
temp
->
_share
[
1
]);
_share
[
1
]
->
mul
(
rhs
,
temp
->
_share
[
1
]);
truncate
3
(
temp
.
get
(),
ret
,
rhs
->
scaling_factor
());
truncate
(
temp
.
get
(),
ret
,
rhs
->
scaling_factor
());
}
}
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
...
@@ -404,7 +404,7 @@ void FixedPointTensor<T, N>::mat_mul(const TensorAdapter<T>* rhs,
...
@@ -404,7 +404,7 @@ void FixedPointTensor<T, N>::mat_mul(const TensorAdapter<T>* rhs,
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
FixedPointTensor
<
T
,
N
>*
ret
)
const
{
_share
[
0
]
->
mat_mul
(
rhs
,
ret
->
_share
[
0
]);
_share
[
0
]
->
mat_mul
(
rhs
,
ret
->
_share
[
0
]);
_share
[
1
]
->
mat_mul
(
rhs
,
ret
->
_share
[
1
]);
_share
[
1
]
->
mat_mul
(
rhs
,
ret
->
_share
[
1
]);
truncate
3
(
ret
,
ret
,
rhs
->
scaling_factor
());
truncate
(
ret
,
ret
,
rhs
->
scaling_factor
());
}
}
template
<
typename
T
,
size_t
N
>
template
<
typename
T
,
size_t
N
>
...
@@ -831,7 +831,7 @@ void FixedPointTensor<T, N>::long_div(const FixedPointTensor<T, N>* rhs,
...
@@ -831,7 +831,7 @@ void FixedPointTensor<T, N>::long_div(const FixedPointTensor<T, N>* rhs,
}
}
for
(
size_t
i
=
1
;
i
<=
N
;
++
i
)
{
for
(
size_t
i
=
1
;
i
<=
N
;
++
i
)
{
truncate
3
(
&
abs_rhs
,
&
sub_rhs
,
i
);
truncate
(
&
abs_rhs
,
&
sub_rhs
,
i
);
abs_lhs
.
gt
(
&
sub_rhs
,
&
cmp_res
);
abs_lhs
.
gt
(
&
sub_rhs
,
&
cmp_res
);
cmp_res
.
mul
(
&
sub_rhs
,
&
sub_rhs
);
cmp_res
.
mul
(
&
sub_rhs
,
&
sub_rhs
);
cmp_res
.
lshift
(
N
-
i
,
&
cmp_res
);
cmp_res
.
lshift
(
N
-
i
,
&
cmp_res
);
...
@@ -1184,7 +1184,7 @@ void FixedPointTensor<T, N>::inverse_square_root(const FixedPointTensor* op,
...
@@ -1184,7 +1184,7 @@ void FixedPointTensor<T, N>::inverse_square_root(const FixedPointTensor* op,
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
x2
=
std
::
shared_ptr
<
FixedPointTensor
<
T
,
N
>>
x2
=
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp
[
2
].
get
(),
temp
[
3
].
get
());
std
::
make_shared
<
FixedPointTensor
<
T
,
N
>>
(
temp
[
2
].
get
(),
temp
[
3
].
get
());
// x2 = 0.5 * op
// x2 = 0.5 * op
truncate
3
(
op
,
x2
.
get
(),
1
);
truncate
(
op
,
x2
.
get
(),
1
);
assign_to_tensor
(
y
->
mutable_share
(
0
),
(
T
)(
x0
*
pow
(
2
,
N
)));
assign_to_tensor
(
y
->
mutable_share
(
0
),
(
T
)(
x0
*
pow
(
2
,
N
)));
assign_to_tensor
(
y
->
mutable_share
(
1
),
(
T
)(
x0
*
pow
(
2
,
N
)));
assign_to_tensor
(
y
->
mutable_share
(
1
),
(
T
)(
x0
*
pow
(
2
,
N
)));
...
...
core/privc3/fixedpoint_tensor_test.cc
浏览文件 @
193e4010
...
@@ -1267,6 +1267,7 @@ TEST_F(FixedTensorTest, mulfixed) {
...
@@ -1267,6 +1267,7 @@ TEST_F(FixedTensorTest, mulfixed) {
EXPECT_TRUE
(
test_fixedt_check_tensor_eq
(
out0
.
get
(),
&
result
));
EXPECT_TRUE
(
test_fixedt_check_tensor_eq
(
out0
.
get
(),
&
result
));
}
}
#ifndef USE_ABY3_TRUNC1 //use aby3 trunc1
TEST_F
(
FixedTensorTest
,
mulfixed_multi_times
)
{
TEST_F
(
FixedTensorTest
,
mulfixed_multi_times
)
{
std
::
vector
<
size_t
>
shape
=
{
100000
,
1
};
std
::
vector
<
size_t
>
shape
=
{
100000
,
1
};
...
@@ -1327,6 +1328,7 @@ TEST_F(FixedTensorTest, mulfixed_multi_times) {
...
@@ -1327,6 +1328,7 @@ TEST_F(FixedTensorTest, mulfixed_multi_times) {
EXPECT_TRUE
(
test_fixedt_check_tensor_eq
(
out1
.
get
(),
out2
.
get
()));
EXPECT_TRUE
(
test_fixedt_check_tensor_eq
(
out1
.
get
(),
out2
.
get
()));
EXPECT_TRUE
(
test_fixedt_check_tensor_eq
(
out0
.
get
(),
&
result
));
EXPECT_TRUE
(
test_fixedt_check_tensor_eq
(
out0
.
get
(),
&
result
));
}
}
#endif
TEST_F
(
FixedTensorTest
,
mulfixed_overflow
)
{
TEST_F
(
FixedTensorTest
,
mulfixed_overflow
)
{
...
@@ -3435,4 +3437,124 @@ TEST_F(FixedTensorTest, inv_sqrt_test) {
...
@@ -3435,4 +3437,124 @@ TEST_F(FixedTensorTest, inv_sqrt_test) {
}
}
#ifdef USE_ABY3_TRUNC1 //use aby3 trunc1
TEST_F
(
FixedTensorTest
,
truncate1_msb_incorrect
)
{
std
::
vector
<
size_t
>
shape
=
{
1
};
std
::
shared_ptr
<
TensorAdapter
<
int64_t
>>
sl
[
3
]
=
{
gen
(
shape
),
gen
(
shape
),
gen
(
shape
)
};
std
::
shared_ptr
<
TensorAdapter
<
int64_t
>>
sout
[
6
]
=
{
gen
(
shape
),
gen
(
shape
),
gen
(
shape
),
gen
(
shape
),
gen
(
shape
),
gen
(
shape
)};
// lhs = 6 = 1 + 2 + 3, share before truncate
// zero share 0 = (1 << 62) + (1 << 62) - (1 << 63)
sl
[
0
]
->
data
()[
0
]
=
((
int64_t
)
3
<<
32
)
-
((
uint64_t
)
1
<<
63
);
sl
[
1
]
->
data
()[
0
]
=
((
int64_t
)
2
<<
32
)
+
((
int64_t
)
1
<<
62
);
sl
[
2
]
->
data
()[
0
]
=
((
int64_t
)
1
<<
32
)
+
((
int64_t
)
1
<<
62
);
auto
pr
=
gen
(
shape
);
// rhs = 15
pr
->
data
()[
0
]
=
6
<<
16
;
pr
->
scaling_factor
()
=
16
;
Fix64N16
fl0
(
sl
[
0
].
get
(),
sl
[
1
].
get
());
Fix64N16
fl1
(
sl
[
1
].
get
(),
sl
[
2
].
get
());
Fix64N16
fl2
(
sl
[
2
].
get
(),
sl
[
0
].
get
());
Fix64N16
fout0
(
sout
[
0
].
get
(),
sout
[
1
].
get
());
Fix64N16
fout1
(
sout
[
2
].
get
(),
sout
[
3
].
get
());
Fix64N16
fout2
(
sout
[
4
].
get
(),
sout
[
5
].
get
());
auto
p
=
gen
(
shape
);
_t
[
0
]
=
std
::
thread
(
[
&
]
()
{
g_ctx_holder
::
template
run_with_context
(
_exec_ctx
.
get
(),
_mpc_ctx
[
0
],
[
&
]()
{
Fix64N16
::
truncate
(
&
fl0
,
&
fout0
,
16
);
fout0
.
reveal_to_one
(
0
,
p
.
get
());
});
}
);
_t
[
1
]
=
std
::
thread
(
[
&
]
()
{
g_ctx_holder
::
template
run_with_context
(
_exec_ctx
.
get
(),
_mpc_ctx
[
1
],
[
&
]()
{
Fix64N16
::
truncate
(
&
fl1
,
&
fout1
,
16
);
fout1
.
reveal_to_one
(
0
,
nullptr
);
});
}
);
_t
[
2
]
=
std
::
thread
(
[
&
]
()
{
g_ctx_holder
::
template
run_with_context
(
_exec_ctx
.
get
(),
_mpc_ctx
[
2
],
[
&
]()
{
Fix64N16
::
truncate
(
&
fl2
,
&
fout2
,
16
);
fout2
.
reveal_to_one
(
0
,
nullptr
);
});
}
);
for
(
auto
&
t
:
_t
)
{
t
.
join
();
}
// failed: result is not close to 6
EXPECT_GT
(
std
::
abs
((
p
->
data
()[
0
]
>>
16
)
-
6
),
1000
);
}
#else
TEST_F
(
FixedTensorTest
,
truncate3_msb_correct
)
{
std
::
vector
<
size_t
>
shape
=
{
1
};
std
::
shared_ptr
<
TensorAdapter
<
int64_t
>>
sl
[
3
]
=
{
gen
(
shape
),
gen
(
shape
),
gen
(
shape
)
};
std
::
shared_ptr
<
TensorAdapter
<
int64_t
>>
sout
[
6
]
=
{
gen
(
shape
),
gen
(
shape
),
gen
(
shape
),
gen
(
shape
),
gen
(
shape
),
gen
(
shape
)};
// lhs = 6 = 1 + 2 + 3, share before truncate
// zero share 0 = (1 << 62) + (1 << 62) - (1 << 63)
sl
[
0
]
->
data
()[
0
]
=
((
int64_t
)
3
<<
32
)
-
((
uint64_t
)
1
<<
63
);
sl
[
1
]
->
data
()[
0
]
=
((
int64_t
)
2
<<
32
)
+
((
int64_t
)
1
<<
62
);
sl
[
2
]
->
data
()[
0
]
=
((
int64_t
)
1
<<
32
)
+
((
int64_t
)
1
<<
62
);
auto
pr
=
gen
(
shape
);
// rhs = 15
pr
->
data
()[
0
]
=
6
<<
16
;
pr
->
scaling_factor
()
=
16
;
Fix64N16
fl0
(
sl
[
0
].
get
(),
sl
[
1
].
get
());
Fix64N16
fl1
(
sl
[
1
].
get
(),
sl
[
2
].
get
());
Fix64N16
fl2
(
sl
[
2
].
get
(),
sl
[
0
].
get
());
Fix64N16
fout0
(
sout
[
0
].
get
(),
sout
[
1
].
get
());
Fix64N16
fout1
(
sout
[
2
].
get
(),
sout
[
3
].
get
());
Fix64N16
fout2
(
sout
[
4
].
get
(),
sout
[
5
].
get
());
auto
p
=
gen
(
shape
);
_t
[
0
]
=
std
::
thread
(
[
&
]
()
{
g_ctx_holder
::
template
run_with_context
(
_exec_ctx
.
get
(),
_mpc_ctx
[
0
],
[
&
]()
{
Fix64N16
::
truncate
(
&
fl0
,
&
fout0
,
16
);
fout0
.
reveal_to_one
(
0
,
p
.
get
());
});
}
);
_t
[
1
]
=
std
::
thread
(
[
&
]
()
{
g_ctx_holder
::
template
run_with_context
(
_exec_ctx
.
get
(),
_mpc_ctx
[
1
],
[
&
]()
{
Fix64N16
::
truncate
(
&
fl1
,
&
fout1
,
16
);
fout1
.
reveal_to_one
(
0
,
nullptr
);
});
}
);
_t
[
2
]
=
std
::
thread
(
[
&
]
()
{
g_ctx_holder
::
template
run_with_context
(
_exec_ctx
.
get
(),
_mpc_ctx
[
2
],
[
&
]()
{
Fix64N16
::
truncate
(
&
fl2
,
&
fout2
,
16
);
fout2
.
reveal_to_one
(
0
,
nullptr
);
});
}
);
for
(
auto
&
t
:
_t
)
{
t
.
join
();
}
EXPECT_EQ
((
p
->
data
()[
0
]
>>
16
),
6
);
}
#endif
}
// namespace aby3
}
// namespace aby3
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录