Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
32e05b01
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
32e05b01
编写于
11月 12, 2018
作者:
J
JiabinYang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
test=develop
上级
c8801e10
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
40 addition
and
31 deletion
+40
-31
paddle/fluid/operators/hierarchical_sigmoid_op.h
paddle/fluid/operators/hierarchical_sigmoid_op.h
+9
-0
paddle/fluid/operators/math/matrix_bit_code.h
paddle/fluid/operators/math/matrix_bit_code.h
+1
-1
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+2
-5
python/paddle/fluid/tests/unittests/test_hsigmoid_op.py
python/paddle/fluid/tests/unittests/test_hsigmoid_op.py
+28
-25
未找到文件。
paddle/fluid/operators/hierarchical_sigmoid_op.h
浏览文件 @
32e05b01
...
...
@@ -86,6 +86,7 @@ class HierarchicalSigmoidOpKernel : public framework::OpKernel<T> {
trans
(
ctx
.
template
device_context
<
DeviceContext
>(),
pre_out_data
,
pre_out_data
+
pre_out
->
numel
(),
pre_out_data
,
ClipFunctor
<
T
>
(
static_cast
<
T
>
(
-
40.0
),
static_cast
<
T
>
(
40.0
)));
pre_out_mat
=
-
1
*
pre_out_mat
;
bit_code
->
Sum
(
*
pre_out
,
out
,
static_cast
<
T
>
(
-
1
));
// use softrelu to calculate cross entropy
pre_out_mat
.
device
(
place
)
=
(
static_cast
<
T
>
(
1.0
)
+
pre_out_mat
.
exp
()).
log
();
...
...
@@ -146,6 +147,7 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> {
auto
pre_out_mat
=
EigenMatrix
<
T
>::
From
(
*
pre_out
);
auto
pre_out_grad_mat
=
EigenMatrix
<
T
>::
From
(
pre_out_grad
);
auto
out_grad_mat
=
EigenMatrix
<
T
>::
From
(
*
out_grad
);
Eigen
::
array
<
int
,
2
>
bcast
({{
1
,
static_cast
<
int
>
(
pre_out_grad
.
dims
()[
1
])}});
// softrelu derivative
...
...
@@ -160,9 +162,16 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> {
bias_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
zero
(
dev_ctx
,
bias_grad
,
static_cast
<
T
>
(
0.0
));
bit_code
->
AddGrad
(
pre_out_grad
,
bias_grad
);
auto
bias_grad_mat
=
EigenMatrix
<
T
>::
From
(
*
bias_grad
);
bias_grad_mat
=
-
1
*
bias_grad_mat
;
}
bit_code
->
MulGradWeight
(
pre_out_grad
,
w_grad
,
*
in
);
bit_code
->
MulGradError
(
pre_out_grad
,
*
w
,
in_grad
);
auto
w_grad_mat
=
EigenMatrix
<
T
>::
From
(
*
w_grad
);
auto
in_grad_mat
=
EigenMatrix
<
T
>::
From
(
*
in_grad
);
w_grad_mat
=
-
1
*
w_grad_mat
;
in_grad_mat
=
-
1
*
in_grad_mat
;
}
};
...
...
paddle/fluid/operators/math/matrix_bit_code.h
浏览文件 @
32e05b01
...
...
@@ -157,7 +157,7 @@ class CustomCode : public Code {
int
get_length
()
const
{
int
length
=
0
;
for
(
int
i
=
0
;
i
<
ptable_
->
dims
()[
1
]
;
i
++
)
{
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
ptable_
->
dims
()[
1
])
;
i
++
)
{
if
(
ptable_
->
data
<
R
>
()[
index_
*
static_cast
<
int
>
(
ptable_
->
dims
()[
1
])
+
i
]
!=
-
1
)
{
length
++
;
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
32e05b01
...
...
@@ -138,11 +138,8 @@ class OpTest(unittest.TestCase):
cls
.
dtype
=
"float32"
cls
.
outputs
=
{}
# np.random.seed(123)
# random.seed(124)
np
.
random
.
seed
(
190
)
random
.
seed
(
200
)
np
.
random
.
seed
(
123
)
random
.
seed
(
124
)
@
classmethod
def
tearDownClass
(
cls
):
...
...
python/paddle/fluid/tests/unittests/test_hsigmoid_op.py
浏览文件 @
32e05b01
...
...
@@ -17,6 +17,9 @@ from __future__ import print_function
import
unittest
import
numpy
as
np
import
math
# import paddle.fluid as fluid
# import paddle.fluid.core as core
# from op_builder import OpBuilder
from
op_test
import
OpTest
np
.
random
.
seed
(
100
)
...
...
@@ -51,7 +54,7 @@ class CodeTableWithCustomTree(object):
def
get_length
(
self
):
length
=
0
for
ele
in
self
.
ptable_
[
self
.
index_
]:
for
ele
in
self
.
ptable_
[
self
.
index_
]:
# find the first -1 to stop trace
if
ele
>=
0
:
length
=
length
+
1
...
...
@@ -71,12 +74,10 @@ def hsigmoid(x, w, label, bias, num_classes):
pre_sum
=
np
.
zeros
((
batch_size
,
1
))
out
=
np
.
zeros
((
batch_size
,
1
)).
astype
(
"float32"
)
for
i
in
range
(
batch_size
):
#print("\n leaf {leaf}: \n".format(leaf = label[i]))
code_table
=
CodeTable
(
num_classes
,
label
[
i
])
length
=
code_table
.
get_length
()
for
j
in
range
(
length
):
idx
=
code_table
.
cal_index
(
j
)
#print("index {index} ".format(index = j))
pre_output
[
i
][
j
]
+=
bias
[
0
][
idx
]
for
i
in
range
(
batch_size
):
code_table
=
CodeTable
(
num_classes
,
label
[
i
])
...
...
@@ -87,13 +88,12 @@ def hsigmoid(x, w, label, bias, num_classes):
# clip[-40.0, 40.0]
pre_output
=
np
.
clip
(
pre_output
,
-
40.0
,
40.0
)
# out(i, 0) = \sum_j bit(i, j) * preout(i, j)
pre_output
=
-
1
*
pre_output
for
i
in
range
(
batch_size
):
#print("\n leaf {leaf}: \n".format(leaf = label[i]))
code_table
=
CodeTable
(
num_classes
,
label
[
i
])
length
=
code_table
.
get_length
()
sum
=
0.0
for
j
in
range
(
length
):
#print("bit {bit} ".format(bit = code_table.cal_bit(j)))
if
code_table
.
cal_bit
(
j
):
sum
+=
pre_output
[
i
][
j
]
out
[
i
]
=
-
1.0
*
sum
...
...
@@ -108,6 +108,7 @@ def hsigmoidWithCustomTree(x, w, ptable, pcode, label, bias, num_classes):
batch_size
=
x
.
shape
[
0
]
code_length
=
len
(
ptable
[
0
])
code_table
=
[
0
for
_
in
range
(
code_length
)]
# init pre_out with shape [N, code_length]
pre_output
=
np
.
zeros
((
batch_size
,
code_length
))
pre_sum
=
np
.
zeros
((
batch_size
,
1
))
out
=
np
.
zeros
((
batch_size
,
1
)).
astype
(
"float32"
)
...
...
@@ -125,6 +126,7 @@ def hsigmoidWithCustomTree(x, w, ptable, pcode, label, bias, num_classes):
pre_output
[
i
][
j
]
+=
np
.
dot
(
w
[
idx
],
x
[
i
])
# clip[-40.0, 40.0]
pre_output
=
np
.
clip
(
pre_output
,
-
40.0
,
40.0
)
pre_output
=
-
1
*
pre_output
# out(i, 0) = \sum_j bit(i, j) * preout(i, j)
for
i
in
range
(
batch_size
):
code_table
=
CodeTableWithCustomTree
(
ptable
,
pcode
,
i
)
...
...
@@ -141,26 +143,27 @@ def hsigmoidWithCustomTree(x, w, ptable, pcode, label, bias, num_classes):
return
pre_output
,
out
# class TestHSigmoidOp(OpTest):
# def setUp(self):
# self.op_type = "hierarchical_sigmoid"
# num_classes = 6
# feature_size = 8
# batch_size = 7
# x = np.random.random((batch_size, feature_size)).astype("float32")
# w = np.random.random((num_classes - 1, feature_size)).astype("float32")
# label = np.random.randint(0, num_classes, (batch_size, 1))
# bias = np.random.random((1, num_classes - 1)).astype("float32")
# self.attrs = {'num_classes': num_classes}
# self.inputs = {'X': x, 'W': w, 'Label': label, 'Bias': bias}
# pre_output, out = hsigmoid(x, w, label, bias, num_classes)
# self.outputs = {'PreOut': pre_output, 'Out': out}
class
TestHSigmoidOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"hierarchical_sigmoid"
num_classes
=
6
feature_size
=
8
batch_size
=
4
x
=
np
.
random
.
random
((
batch_size
,
feature_size
)).
astype
(
"float32"
)
*
2
w
=
np
.
random
.
random
(
(
num_classes
-
1
,
feature_size
)).
astype
(
"float32"
)
*
2
label
=
np
.
random
.
randint
(
0
,
num_classes
,
(
batch_size
,
1
))
bias
=
np
.
random
.
random
((
1
,
num_classes
-
1
)).
astype
(
"float32"
)
self
.
attrs
=
{
'num_classes'
:
num_classes
}
self
.
inputs
=
{
'X'
:
x
,
'W'
:
w
,
'Label'
:
label
,
'Bias'
:
bias
}
pre_output
,
out
=
hsigmoid
(
x
,
w
,
label
,
bias
,
num_classes
)
self
.
outputs
=
{
'PreOut'
:
pre_output
,
'Out'
:
out
}
#
def test_check_output(self):
#
self.check_output()
def
test_check_output
(
self
):
self
.
check_output
()
#
def test_check_grad(self):
#
self.check_grad(['Bias', 'X', 'W'], ['Out'], no_grad_set=set('Label'))
def
test_check_grad
(
self
):
self
.
check_grad
([
'Bias'
,
'X'
,
'W'
],
[
'Out'
],
no_grad_set
=
set
(
'Label'
))
class
TestHSigmoidOpWithCostumTree
(
OpTest
):
...
...
@@ -169,9 +172,9 @@ class TestHSigmoidOpWithCostumTree(OpTest):
num_classes
=
6
#using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample
feature_size
=
8
batch_size
=
4
x
=
np
.
random
.
random
((
batch_size
,
feature_size
)).
astype
(
"float32"
)
*
10
x
=
np
.
random
.
random
((
batch_size
,
feature_size
)).
astype
(
"float32"
)
*
2
w
=
np
.
random
.
random
(
(
num_classes
-
1
,
feature_size
)).
astype
(
"float32"
)
*
10
(
num_classes
-
1
,
feature_size
)).
astype
(
"float32"
)
*
2
label
=
np
.
array
([
0
,
1
,
4
,
5
])
ptable
=
np
.
array
(
[(
0
,
2
,
-
1
,
-
1
,
-
1
),
(
0
,
1
,
3
,
-
1
,
-
1
),
(
0
,
1
,
4
,
-
1
,
-
1
),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录