Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1abd3b3a
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1abd3b3a
编写于
11月 27, 2017
作者:
Y
Yancey1989
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
implement forward
上级
1971f3ce
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
157 addition
and
12 deletion
+157
-12
paddle/operators/CMakeLists.txt
paddle/operators/CMakeLists.txt
+3
-1
paddle/operators/hierarchical_sigmoid_op.cc
paddle/operators/hierarchical_sigmoid_op.cc
+5
-1
paddle/operators/hierarchical_sigmoid_op.h
paddle/operators/hierarchical_sigmoid_op.h
+36
-3
paddle/operators/math/math_function.cc
paddle/operators/math/math_function.cc
+2
-0
paddle/operators/math/math_function.cu
paddle/operators/math/math_function.cu
+2
-0
paddle/operators/math/math_function.h
paddle/operators/math/math_function.h
+6
-0
paddle/operators/math/math_function_impl.h
paddle/operators/math/math_function_impl.h
+14
-0
paddle/operators/math/matrix_bit_code.cc
paddle/operators/math/matrix_bit_code.cc
+71
-6
paddle/operators/math/matrix_bit_code.h
paddle/operators/math/matrix_bit_code.h
+18
-1
未找到文件。
paddle/operators/CMakeLists.txt
浏览文件 @
1abd3b3a
...
...
@@ -185,7 +185,8 @@ set(DEPS_OPS
tensor_array_read_write_op
gru_op
adagrad_op
sgd_op
)
sgd_op
hierarchical_sigmoid_op
)
op_library
(
cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op
)
...
...
@@ -203,6 +204,7 @@ op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table)
op_library
(
lod_tensor_to_array_op SRCS lod_tensor_to_array_op.cc DEPS lod_rank_table_op
)
op_library
(
array_to_lod_tensor_op SRCS array_to_lod_tensor_op.cc DEPS lod_rank_table_op
)
op_library
(
tensor_array_read_write_op SRCS tensor_array_read_write_op.cc
)
op_library
(
hierarchical_sigmoid_op DEPS matrix_bit_code
)
if
(
WITH_GPU
)
op_library
(
nccl_op DEPS nccl_common
)
endif
()
...
...
paddle/operators/hierarchical_sigmoid_op.cc
浏览文件 @
1abd3b3a
...
...
@@ -85,12 +85,16 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
"(TensorArray, required) The input array. Each Tensor has the "
"same shape with [N * D]."
)
.
AsDuplicable
();
AddInput
(
"Parameters"
,
"(Tensor, required), The parameters of hierarchical "
"sigmoid operator, each of them is s a 2-D tensor."
)
.
AsDuplicable
();
AddInput
(
"Label"
,
"(Tensor, required), The labels of training data. It's a"
"1-D tensor."
);
AddInput
(
"Bias"
,
"(Tensor, optional), The bias is a 1-D tensor, "
"which is applied to the output"
);
"which is applied to the output
.
"
);
AddOutput
(
"Out"
,
"(Tensor, required) The output of hierarchical sigmoid operator."
);
...
...
paddle/operators/hierarchical_sigmoid_op.h
浏览文件 @
1abd3b3a
...
...
@@ -14,28 +14,61 @@ limitations under the License. */
#pragma once
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/matrix_bit_code.h"
namespace
paddle
{
namespace
operators
{
template
<
typename
Place
,
typename
T
>
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
HierarchicalSigmoidOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
params
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"Parameters"
);
auto
*
label
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
auto
*
bias
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Bias"
);
auto
*
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
size_t
num_classes
=
static_cast
<
size_t
>
(
ctx
.
Attr
<
int
>
(
"num_classes"
));
framework
::
Tensor
sum
;
framework
::
Tensor
pre_out
;
auto
place
=
ctx
.
GetEigenDevice
<
Place
>
();
auto
&
device_ctx
=
ctx
.
device_context
();
math
::
ColwiseSum
<
Place
,
T
>
col_sum
;
math
::
RowwiseSum
<
Place
,
T
>
row_sum
;
auto
pre_out_mat
=
EigenMatrix
<
T
>::
From
(
pre_out
);
int64_t
batch_size
=
ins
[
0
]
->
dims
()[
0
];
int64_t
size
=
ins
.
size
();
framework
::
Tensor
pre_out
;
std
::
vector
<
int64_t
>
pre_out_dims
({
batch_size
,
size
});
pre_out
.
mutable_data
<
T
>
(
framework
::
make_ddim
(
pre_out_dims
),
ctx
.
GetPlace
());
std
::
vector
<
int64_t
>
sum_dims
({
batch_size
,
1UL
});
sum
.
mutable_data
<
T
>
(
framework
::
make_ddim
(
sum_dims
),
ctx
.
GetPlace
());
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
if
(
bias
!=
NULL
)
{
if
(
bias
)
{
math
::
AddByBitCode
<
T
>
(
num_classes
,
*
label
,
pre_out
,
*
bias
);
}
for
(
size_t
i
=
0
;
i
<
ins
.
size
();
++
i
)
{
math
::
MulByBitCode
<
T
>
(
num_classes
,
*
label
,
pre_out
,
*
params
[
i
],
*
ins
[
i
]);
}
// clip the matrix with (-40, 40)
pre_out_mat
.
device
(
place
)
=
pre_out_mat
.
abs
().
cwiseMax
(
static_cast
<
T
>
(
40.0
));
math
::
SumByBitCode
<
T
>
(
num_classes
,
*
label
,
*
out
,
pre_out
,
static_cast
<
T
>
(
-
1
));
// softrelu
pre_out_mat
.
device
(
place
)
=
(
static_cast
<
T
>
(
1
)
+
pre_out_mat
.
exp
()).
log
();
row_sum
(
device_ctx
,
pre_out
,
&
sum
);
col_sum
(
device_ctx
,
*
out
,
&
sum
);
}
};
...
...
paddle/operators/math/math_function.cc
浏览文件 @
1abd3b3a
...
...
@@ -314,6 +314,8 @@ template struct RowwiseAdd<platform::CPUPlace, float>;
template
struct
RowwiseAdd
<
platform
::
CPUPlace
,
double
>;
template
struct
ColwiseSum
<
platform
::
CPUPlace
,
float
>;
template
struct
ColwiseSum
<
platform
::
CPUPlace
,
double
>;
template
struct
RowwiseSum
<
platform
::
CPUPlace
,
float
>;
template
struct
RowwiseSum
<
platform
::
CPUPlace
,
double
>;
}
// namespace math
}
// namespace operators
...
...
paddle/operators/math/math_function.cu
浏览文件 @
1abd3b3a
...
...
@@ -298,6 +298,8 @@ template struct RowwiseAdd<platform::GPUPlace, float>;
template
struct
RowwiseAdd
<
platform
::
GPUPlace
,
double
>;
template
struct
ColwiseSum
<
platform
::
GPUPlace
,
float
>;
template
struct
ColwiseSum
<
platform
::
GPUPlace
,
double
>;
template
struct
RowwiseSum
<
platform
::
GPUPlace
,
float
>;
template
struct
RowwiseSum
<
platform
::
GPUPlace
,
float
>;
}
// namespace math
}
// namespace operators
...
...
paddle/operators/math/math_function.h
浏览文件 @
1abd3b3a
...
...
@@ -130,6 +130,12 @@ struct ColwiseSum {
const
framework
::
Tensor
&
input
,
framework
::
Tensor
*
vec
);
};
template
<
typename
Place
,
typename
T
>
struct
RowwiseSum
{
void
operator
()(
const
platform
::
DeviceContext
&
context
,
const
framework
::
Tensor
&
input
,
framework
::
Tensor
*
vec
);
};
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/operators/math/math_function_impl.h
浏览文件 @
1abd3b3a
...
...
@@ -78,6 +78,20 @@ void ColwiseSum<Place, T>::operator()(const platform::DeviceContext& context,
in
.
sum
(
Eigen
::
array
<
int
,
1
>
({{
0
}})).
reshape
(
shape
);
}
template
<
typename
Place
,
typename
T
>
void
RowwiseSum
<
Place
,
T
>::
operator
()(
const
platform
::
DeviceContext
&
context
,
const
framework
::
Tensor
&
input
,
framework
::
Tensor
*
vector
)
{
auto
in_dims
=
input
.
dims
();
auto
size
=
input
.
numel
()
/
in_dims
[
1
];
PADDLE_ENFORCE_EQ
(
vector
->
numel
(),
size
);
auto
in
=
framework
::
EigenMatrix
<
T
>::
From
(
input
);
auto
vec
=
framework
::
EigenMatrix
<
T
>::
From
(
*
vector
);
Eigen
::
array
<
int
,
2
>
shape
({{
static_cast
<
int
>
(
size
),
1
}});
vec
.
reshape
(
shape
).
device
(
*
context
.
GetEigenDevice
<
Place
>
())
=
in
.
sum
(
Eigen
::
array
<
int
,
1
>
({{
0
}})).
reshape
(
shape
);
}
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/operators/math/matrix_bit_code.cc
浏览文件 @
1abd3b3a
...
...
@@ -53,18 +53,18 @@ namespace math {
template
<
class
CodeTable
,
class
Op
,
typename
T
>
static
void
AddByBitCodeT
(
Op
op
,
CodeTable
code_table
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
a
,
framework
::
Tensor
&
b
)
{
const
framework
::
Tensor
&
b
)
{
size_t
num_classes
=
code_table
.
size
();
size_t
max_code_length
=
code_table
.
get_max_code_length
();
size_t
num_sample
=
a
.
dims
()[
0
]
.
size
()
;
size_t
width
=
a
.
dims
()[
1
]
.
size
()
;
size_t
num_sample
=
a
.
dims
()[
0
];
size_t
width
=
a
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
num_sample
;
++
i
)
{
auto
code
=
code_table
(
codes
.
data
<
T
>
()[
i
])
int
code_length
=
code
.
get_length
();
auto
code
=
code_table
(
codes
.
data
<
T
>
()[
i
])
;
int
code_length
=
code
.
get_length
();
for
(
int
j
=
0
;
j
<
code_length
;
+
j
)
{
size_t
index
=
code
.
calc_index
(
j
);
op
(
a
<
T
>
.
data
()[
i
*
width
+
j
],
b
<
T
>
.
data
()[
index
]);
op
(
a
.
data
<
T
>
()[
i
*
width
+
j
],
b
.
data
<
T
>
()[
index
]);
}
}
}
...
...
@@ -79,6 +79,71 @@ void AddByBitCode(size_t num_classes, const framework::Tensor& codes,
AddByBitCodeT
<
T
>
(
op
,
SimpleCodeTable
(
num_classes
),
codes
,
a
,
b
);
}
template
<
class
CodeTable
,
typename
T
>
void
SumByBitCodeT
(
CodeTable
code_table
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
tmat
,
framework
::
Tensor
&
sum
,
const
T
&
scale_sum
)
{
size_t
max_code_length
=
code_table
.
get_max_code_length
();
size_t
num_samples
=
tmat
.
dims
()[
0
];
size_t
o_width
=
tmat
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
num_samples
;
++
i
)
{
T
sm
=
0
;
auto
code
=
code_table
(
codes
.
data
<
T
>
()[
i
]);
int
code_length
=
code
.
get_length
();
for
(
int
j
=
0
;
j
<
code_length
;
++
j
)
{
if
(
code
.
calc_bit
(
j
))
{
sm
+=
tmat
.
data
<
T
>
()[
i
*
o_width
+
j
];
}
}
sum
.
data
<
T
>
()[
i
]
=
scale_sum
*
sm
;
}
}
/* For j < codeLength:
sum(i, 0) = \sum_j bit(i, j) * input(i, j)
*/
template
<
typename
T
>
void
SumByBitCode
(
size_t
num_classes
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
tmat
,
framework
::
Tensor
&
sum
,
T
scale_sum
)
{
SumByBitCodeT
(
SimpleCodeTable
(
num_classes
),
codes
,
tmat
,
scale_sum
);
}
template
<
class
Op
,
class
CodeTable
,
typename
T
>
void
MulByBitCodeT
(
Op
op
,
CodeTable
code_table
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
tmat
,
framework
::
Tensor
&
weight
,
framework
::
Tensor
&
input
)
{
size_t
num_classes
=
code_table
.
size
();
size_t
max_code_length
=
code_table
.
get_max_code_length
();
size_t
num_samples
=
tmat
.
dims
()[
0
];
size_t
input_dim
=
input
.
dims
()[
1
];
size_t
o_width
=
tmat
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
num_samples
;
++
i
)
{
auto
code
=
code_table
(
codes
.
data
<
T
>
()[
i
]);
int
code_length
=
code
.
get_length
();
for
(
int
j
=
0
;
j
<
code_length
;
++
j
)
{
size_t
index
=
code
.
calc_index
(
j
);
op
(
tmat
.
data
<
T
>
()[
i
*
o_width
+
j
],
weight
.
data
<
T
>
()
+
index
*
weight
.
dims
()[
1
],
input
.
data
<
T
>
()
+
i
*
input
.
dims
()[
1
],
input_dim
);
}
}
}
template
<
typename
T
>
void
MulByBitCode
(
size_t
num_classes
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
tmat
,
const
framework
::
Tensor
&
weight
,
const
framework
::
Tensor
&
input
)
{
auto
op
=
[](
T
&
t
,
const
T
*
weight_row
,
const
T
*
input_row
,
size_t
input_dim
)
{
T
sum
=
0
;
for
(
size_t
k
=
0
;
k
<
input_dim
;
++
k
)
{
sum
+=
weight_row
[
k
]
*
input_row
[
k
];
}
t
+=
sum
;
};
MulByBitCode
(
op
,
SimpleCodeTable
(
num_classes
),
codes
,
tmat
,
weight
,
input
);
}
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/operators/math/matrix_bit_code.h
浏览文件 @
1abd3b3a
...
...
@@ -59,10 +59,27 @@ struct SimpleCodeTable {
int
max_code_length_
;
};
/* For j < codeLength
tmat(i, j) += vec(0, index(i, j))
*/
template
<
typename
T
>
void
AddByBitCode
(
size_t
num_classes
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
a
,
const
framework
::
Tensor
&
b
);
framework
::
Tensor
&
tmat
,
const
framework
::
Tensor
&
vec
);
/* For j < codeLength
sum(i, 0) = \sum_j bit(i, j) * tmat(i, j)
*/
template
<
typename
T
>
void
SumByBitCode
(
size_t
num_classes
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
tmat
,
framework
::
Tensor
&
sum
,
T
scale_sum
);
/* For j < codeLength
input.row(i) += tmat(i, j) * weight.row(index(i, j))
*/
template
<
typename
T
>
void
MulByBitCode
(
size_t
num_classes
,
const
framework
::
Tensor
&
codes
,
framework
::
Tensor
&
tmat
,
const
framework
::
Tensor
&
weight
,
const
framework
::
Tensor
&
input
);
}
// namespace math
}
// namespace operators
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录