Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
4c98c2cc
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4c98c2cc
编写于
1月 30, 2019
作者:
X
xuezhong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove debug print
上级
58ad40cc
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
11 addition
and
96 deletion
+11
-96
paddle/fluid/operators/math/sample_prob.cu
paddle/fluid/operators/math/sample_prob.cu
+0
-27
paddle/fluid/operators/sample_logits_op.cc
paddle/fluid/operators/sample_logits_op.cc
+10
-33
paddle/fluid/operators/sample_logits_op.cu
paddle/fluid/operators/sample_logits_op.cu
+1
-2
paddle/fluid/operators/sample_logits_op.h
paddle/fluid/operators/sample_logits_op.h
+0
-34
未找到文件。
paddle/fluid/operators/math/sample_prob.cu
浏览文件 @
4c98c2cc
...
...
@@ -112,33 +112,6 @@ int UniqSampler(const Sampler& sampler, const std::size_t num_samples,
}
return
num_tries
;
}
/*
template <typename T>
void Print(Tensor & t, std::string name) {
if (!FLAGS_debug_print) {
return;
}
VLOG(1) << "qxz print "<< name;
VLOG(1) << name << "size = " << t.numel();
size_t size = t.numel();
type *d = t.data<type>();
#ifdef PADDLE_WITH_CUDA
std::vector<type> vec;
platform::DeviceContextPool::Instance().Get(t.place())->Wait();
if (platform::is_gpu_place(t.place())) {
vec.resize(size);
cudaMemcpy(vec.data(), d, sizeof(T) * size, cudaMemcpyDeviceToHost);
d = vec.data();
}
#endif
VLOG(1) << name << " data_ptr = " << static_cast<void*>(d);
std::string out;
for (size_t i = 0; i < size; i++) {
out += std::to_string(d[i]);
out += ",";
}
VLOG(1) << out;
}*/
template
<
typename
T
>
void
GPUSampleWithProb
<
T
>::
operator
()(
...
...
paddle/fluid/operators/sample_logits_op.cc
浏览文件 @
4c98c2cc
...
...
@@ -64,12 +64,13 @@ class SampleLogitsOpMaker : public framework::OpProtoAndCheckerMaker {
.
AsIntermediate
();
AddOutput
(
"SampledLogits"
,
"(Tensor, default: Tensor<float>), A 2-D tensor with shape"
"[N x S+NT]. The outputs value of sample
d softmax
, which will be"
"[N x S+NT]. The outputs value of sample
logits
, which will be"
"used in backward calculation."
)
.
AsIntermediate
();
AddOutput
(
"SampledLabel"
,
"(Tensor, default: Tensor<int64>), A 2-D tensor. The cross "
"entropy loss with shape [N x NT]."
);
AddOutput
(
"SampledLabel"
,
"(Tensor, default: Tensor<int64>), A 2-D tensor. The sampled label"
"with shape [N x S + NT]."
);
AddAttr
<
bool
>
(
"use_custom_samples"
,
"An indicator whether to use custom samples with probabilities, if True"
...
...
@@ -81,7 +82,7 @@ class SampleLogitsOpMaker : public framework::OpProtoAndCheckerMaker {
"An indicator whether to sample non-repetitive negtive labels, if True"
"the operator will sample negtive labels without replacement."
"otherwise, the operator will sample negtive labels with replacement."
)
.
SetDefault
(
fals
e
);
.
SetDefault
(
tru
e
);
AddAttr
<
bool
>
(
"remove_accidental_hits"
,
"An indicator whether to remove accidental hits when samples hits true"
...
...
@@ -92,35 +93,11 @@ class SampleLogitsOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
int
>
(
"seed"
,
"Random seed for generating samples"
).
SetDefault
(
0
);
AddComment
(
R"DOC(
TODO(chenfeiyu): Write documentation for this Operator.
Sampled Softmax With Cross Entropy Operator.
Cross entropy loss with sampled softmax is used as the output layer extensively.
This operator computes the softmax normalized values for each row of the input
tensor, after which cross-entropy loss is computed. This provides a more
numerically stable gradient.
Because this operator performs a softmax on logits internally, it expects
unscaled logits. This operator should not be used with the output of
softmax operator since that would produce incorrect results.
When the attribute soft_label is set false, this operators expects mutually
exclusive hard labels, each sample in a batch is in exactly one class with a
probability of 1.0. Each sample in the batch will have a single label.
The equation is as follows:
1) Hard label (one-hot label, so every sample has exactly one class)
$$Loss_j = -\text{Logit}_{Label_j} +
\log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right),
j = 1,..., K$$
2) Soft label (each sample can have a distribution over all classes)
"""
Computes sampled output training logits and labels suitable for implementing
sampled softmax.
$$Loss_j = -\sum_{i=0}^{K}\text{Label}_i \left(\text{Logit}_i -
\log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right)\right),
j = 1,...,K$$
"""
)DOC"
);
}
...
...
paddle/fluid/operators/sample_logits_op.cu
浏览文件 @
4c98c2cc
...
...
@@ -248,8 +248,7 @@ class SampleLogitsGradCUDAKernel : public framework::OpKernel<T> {
if
(
!
FLAGS_debug_print
)
{
return
;
}
VLOG
(
1
)
<<
"qxz print "
<<
name
;
VLOG
(
1
)
<<
name
<<
"size = "
<<
t
.
numel
();
VLOG
(
1
)
<<
name
<<
" size = "
<<
t
.
numel
();
size_t
size
=
t
.
numel
();
const
type
*
d
=
t
.
data
<
type
>
();
#ifdef PADDLE_WITH_CUDA
...
...
paddle/fluid/operators/sample_logits_op.h
浏览文件 @
4c98c2cc
...
...
@@ -207,37 +207,6 @@ class SampleLogitsKernel : public framework::OpKernel<T> {
num_true
);
}
/* Debug
const auto num_sampled_classes = samples_dim[1];
std::cout << "Sampled Logits" << std::endl;
const auto sampled_logits_data = sampled_logits->data<T>();
for (int i = 0; i < sampled_logits->numel(); ++i) {
std::cout << sampled_logits_data[i] << ", ";
if ((i + 1) % num_sampled_classes == 0)
std::cout << std::endl;
}
std::cout << std::endl;
*/
/* Debug
std::cout << "Samples" << std::endl;
const auto samples_data = samples->data<int64_t>();
for (int i = 0; i < samples->numel(); ++i) {
std::cout << samples_data[i] << ", ";
if ((i + 1) % num_sampled_classes == 0)
std::cout << std::endl;
}
std::cout << std::endl;
*/
/* Debug
std::cout << "Probabilities" << std::endl;
const auto probabilities_data = probabilities->data<T>();
for (int i = 0; i < probabilities->numel(); ++i) {
std::cout << probabilities_data[i] << ", ";
if ((i + 1) % num_sampled_classes == 0)
std::cout << std::endl;
}
std::cout << std::endl;
*/
// subtracted sampled logits with logQ(y|x)
auto
probs
=
EigenMatrix
<
T
>::
From
(
*
probabilities
);
auto
smp_logits
=
EigenMatrix
<
T
>::
From
(
*
sampled_logits
);
...
...
@@ -263,9 +232,6 @@ class SampleLogitsGradKernel : public framework::OpKernel<T> {
math
::
SetConstant
<
platform
::
CPUDeviceContext
,
T
>
set_zero
;
set_zero
(
dev_ctx
,
logits_grad
,
static_cast
<
T
>
(
0
));
// const bool remove_accidental_hits =
// context.Attr<bool>("remove_accidental_hits");
// UNDERSTAND: scatter it back to logit_grad
CPUPutAlongD1
<
T
>
(
dev_ctx
,
logits_grad
,
*
samples
,
*
sampled_logits_grad
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录